[[package]]
name = "socket2"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2"
+checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad"
dependencies = [
"libc",
"winapi",
- name: Install MinGW toolchain and wine
if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
run: |
+ sudo apt-get update
sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
rustup target add x86_64-pc-windows-gnu
- name: Install AArch64 toolchain and qemu
if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
run: |
+ sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
- name: Prepare dependencies
/rand
/regex
/simple-raytracer
+/stdsimd
[[package]]
name = "anyhow"
-version = "1.0.38"
+version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1"
+checksum = "595d3cfa7a60d4555cb5067b99f07142a08ea778de5cf993f7b75c7d8fabc486"
[[package]]
name = "ar"
[[package]]
name = "cranelift-bforest"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-codegen"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"cranelift-bforest",
"cranelift-codegen-meta",
[[package]]
name = "cranelift-codegen-meta"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"cranelift-codegen-shared",
"cranelift-entity",
[[package]]
name = "cranelift-codegen-shared"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
[[package]]
name = "cranelift-entity"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
[[package]]
name = "cranelift-frontend"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"cranelift-codegen",
"log",
[[package]]
name = "cranelift-jit"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"anyhow",
"cranelift-codegen",
[[package]]
name = "cranelift-module"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"anyhow",
"cranelift-codegen",
[[package]]
name = "cranelift-native"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"cranelift-codegen",
"libc",
[[package]]
name = "cranelift-object"
version = "0.75.0"
-source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
dependencies = [
"anyhow",
"cranelift-codegen",
[[package]]
name = "gimli"
-version = "0.24.0"
+version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
+checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
dependencies = [
"indexmap",
]
[[package]]
name = "hashbrown"
-version = "0.9.1"
+version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
[[package]]
name = "indexmap"
-version = "1.6.1"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
+checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5"
dependencies = [
"autocfg",
"hashbrown",
[[package]]
name = "libc"
-version = "0.2.97"
+version = "0.2.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
+checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
[[package]]
name = "libloading"
[[package]]
name = "object"
-version = "0.25.3"
+version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7"
+checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386"
dependencies = [
"crc32fast",
"indexmap",
[[package]]
name = "target-lexicon"
-version = "0.12.0"
+version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834"
+checksum = "b0652da4c4121005e9ed22b79f6c5f2d9e2752906b53a33e9490489ba421a6fb"
[[package]]
name = "winapi"
[dependencies]
# These have to be in sync with each other
-cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", features = ["unwind", "all-arch"] }
-cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
-cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
-cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
-cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", optional = true }
-cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
+cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", features = ["unwind", "all-arch"] }
+cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git" }
+cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git" }
+cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git" }
+cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", optional = true }
+cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git" }
target-lexicon = "0.12.0"
-gimli = { version = "0.24.0", default-features = false, features = ["write"]}
-object = { version = "0.25.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
+gimli = { version = "0.25.0", default-features = false, features = ["write"]}
+object = { version = "0.26.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
indexmap = "1.0.2"
#gimli = { path = "../" }
[features]
-default = ["jit", "inline_asm"]
+# Enable features not ready to be enabled when compiling as part of rustc
+unstable-features = ["jit", "inline_asm"]
jit = ["cranelift-jit", "libloading"]
inline_asm = []
[[package]]
name = "cc"
-version = "1.0.68"
+version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
+checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2"
[[package]]
name = "cfg-if"
[[package]]
name = "libc"
-version = "0.2.97"
+version = "0.2.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
+checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
dependencies = [
"rustc-std-workspace-core",
]
"test",
]
-[[package]]
-name = "term"
-version = "0.0.0"
-dependencies = [
- "core",
- "std",
-]
-
[[package]]
name = "test"
version = "0.0.0"
"panic_unwind",
"proc_macro",
"std",
- "term",
]
[[package]]
pub(crate) fn build_backend(channel: &str, host_triple: &str) -> PathBuf {
let mut cmd = Command::new("cargo");
- cmd.arg("build").arg("--target").arg(host_triple);
+ cmd.arg("build").arg("--target").arg(host_triple).arg("--features").arg("unstable-features");
match channel {
"debug" => {}
{
let file = file.unwrap().path();
let file_name_str = file.file_name().unwrap().to_str().unwrap();
- if file_name_str.contains("rustc_")
+ if (file_name_str.contains("rustc_")
+ && !file_name_str.contains("rustc_std_workspace_")
+ && !file_name_str.contains("rustc_demangle"))
|| file_name_str.contains("chalk")
|| file_name_str.contains("tracing")
|| file_name_str.contains("regex")
"341f207c1071f7290e3f228c710817c280c8dca1",
);
+ clone_repo(
+ "stdsimd",
+ "https://github.com/rust-lang/stdsimd",
+ "be96995d8ddec03fac9a0caf4d4c51c7fbc33507",
+ );
+ apply_patches("stdsimd", Path::new("stdsimd"));
+
clone_repo(
"simple-raytracer",
"https://github.com/ebobby/simple-raytracer",
copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
let rustc_version = get_rustc_version();
- fs::write(
- Path::new("build_sysroot").join("rustc_version"),
- &rustc_version,
- )
- .unwrap();
+ fs::write(Path::new("build_sysroot").join("rustc_version"), &rustc_version).unwrap();
eprintln!("[GIT] init");
let mut git_init_cmd = Command::new("git");
rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
rm -rf target/ build/ perf.data{,.old}
-rm -rf rand/ regex/ simple-raytracer/
+rm -rf rand/ regex/ simple-raytracer/ stdsimd/
or
```bash
-$ $cg_clif_dir/build/bin/cg_clif -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
+$ $cg_clif_dir/build/bin/cg_clif -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
```
There is also an experimental lazy jit mode. In this mode functions are only compiled once they are
```bash
function jit_naked() {
- echo "$@" | $cg_clif_dir/build/bin/cg_clif - -Cllvm-args=mode=jit -Cprefer-dynamic
+ echo "$@" | $cg_clif_dir/build/bin/cg_clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
}
function jit() {
--- /dev/null
+From 6bfce5dc2cbf834c74dbccb7538adc08c6eb57e7 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 25 Jul 2021 18:39:31 +0200
+Subject: [PATCH] Disable unsupported tests
+
+---
+ crates/core_simd/src/array.rs | 2 ++
+ crates/core_simd/src/lib.rs | 2 +-
+ crates/core_simd/src/math.rs | 4 ++++
+ crates/core_simd/tests/masks.rs | 12 ------------
+ crates/core_simd/tests/ops_macros.rs | 6 ++++++
+ crates/core_simd/tests/round.rs | 2 ++
+ 6 files changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/crates/core_simd/src/array.rs b/crates/core_simd/src/array.rs
+index 25c5309..2b3d819 100644
+--- a/crates/core_simd/src/array.rs
++++ b/crates/core_simd/src/array.rs
+@@ -22,6 +22,7 @@ where
+ #[must_use]
+ fn splat(val: Self::Scalar) -> Self;
+
++ /*
+ /// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
+ /// If an index is out of bounds, that lane instead selects the value from the "or" vector.
+ /// ```
+@@ -150,6 +151,7 @@ where
+ // Cleared ☢️ *mut T Zone
+ }
+ }
++ */
+ }
+
+ macro_rules! impl_simdarray_for {
+diff --git a/crates/core_simd/src/lib.rs b/crates/core_simd/src/lib.rs
+index a64904d..299eb11 100644
+--- a/crates/core_simd/src/lib.rs
++++ b/crates/core_simd/src/lib.rs
+@@ -1,7 +1,7 @@
+ #![no_std]
+ #![allow(incomplete_features)]
+ #![feature(
+- const_generics,
++ const_generics,
+ platform_intrinsics,
+ repr_simd,
+ simd_ffi,
+diff --git a/crates/core_simd/src/math.rs b/crates/core_simd/src/math.rs
+index 7290a28..e394730 100644
+--- a/crates/core_simd/src/math.rs
++++ b/crates/core_simd/src/math.rs
+@@ -2,6 +2,7 @@ macro_rules! impl_uint_arith {
+ ($(($name:ident, $n:ident)),+) => {
+ $( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
+
++ /*
+ /// Lanewise saturating add.
+ ///
+ /// # Examples
+@@ -38,6 +39,7 @@ macro_rules! impl_uint_arith {
+ pub fn saturating_sub(self, second: Self) -> Self {
+ unsafe { crate::intrinsics::simd_saturating_sub(self, second) }
+ }
++ */
+ })+
+ }
+ }
+@@ -46,6 +48,7 @@ macro_rules! impl_int_arith {
+ ($(($name:ident, $n:ident)),+) => {
+ $( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
+
++ /*
+ /// Lanewise saturating add.
+ ///
+ /// # Examples
+@@ -141,6 +144,7 @@ macro_rules! impl_int_arith {
+ pub fn saturating_neg(self) -> Self {
+ Self::splat(0).saturating_sub(self)
+ }
++ */
+ })+
+ }
+ }
+diff --git a/crates/core_simd/tests/masks.rs b/crates/core_simd/tests/masks.rs
+index 61d8e44..2bccae2 100644
+--- a/crates/core_simd/tests/masks.rs
++++ b/crates/core_simd/tests/masks.rs
+@@ -67,18 +67,6 @@ macro_rules! test_mask_api {
+ assert_eq!(int.to_array(), [-1, 0, 0, -1, 0, 0, -1, 0]);
+ assert_eq!(core_simd::$name::<8>::from_int(int), mask);
+ }
+-
+- #[test]
+- fn roundtrip_bitmask_conversion() {
+- let values = [
+- true, false, false, true, false, false, true, false,
+- true, true, false, false, false, false, false, true,
+- ];
+- let mask = core_simd::$name::<16>::from_array(values);
+- let bitmask = mask.to_bitmask();
+- assert_eq!(bitmask, [0b01001001, 0b10000011]);
+- assert_eq!(core_simd::$name::<16>::from_bitmask(bitmask), mask);
+- }
+ }
+ }
+ }
+diff --git a/crates/core_simd/tests/ops_macros.rs b/crates/core_simd/tests/ops_macros.rs
+index cb39e73..fc0ebe1 100644
+--- a/crates/core_simd/tests/ops_macros.rs
++++ b/crates/core_simd/tests/ops_macros.rs
+@@ -435,6 +435,7 @@ macro_rules! impl_float_tests {
+ )
+ }
+
++ /*
+ fn mul_add<const LANES: usize>() {
+ test_helpers::test_ternary_elementwise(
+ &Vector::<LANES>::mul_add,
+@@ -442,6 +443,7 @@ macro_rules! impl_float_tests {
+ &|_, _, _| true,
+ )
+ }
++ */
+
+ fn sqrt<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+@@ -581,6 +585,7 @@ macro_rules! impl_float_tests {
+ });
+ }
+
++ /*
+ fn horizontal_max<const LANES: usize>() {
+ test_helpers::test_1(&|x| {
+ let vmax = Vector::<LANES>::from_array(x).horizontal_max();
+@@ -604,6 +609,7 @@ macro_rules! impl_float_tests {
+ Ok(())
+ });
+ }
++ */
+ }
+ }
+ }
+diff --git a/crates/core_simd/tests/round.rs b/crates/core_simd/tests/round.rs
+index 37044a7..4cdc6b7 100644
+--- a/crates/core_simd/tests/round.rs
++++ b/crates/core_simd/tests/round.rs
+@@ -25,6 +25,7 @@ macro_rules! float_rounding_test {
+ )
+ }
+
++ /*
+ fn round<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+ &Vector::<LANES>::round,
+@@ -32,6 +33,7 @@ macro_rules! float_rounding_test {
+ &|_| true,
+ )
+ }
++ */
+
+ fn trunc<const LANES: usize>() {
+ test_helpers::test_unary_elementwise(
+--
+2.26.2.7.g19db9cfb68
+
#[test]
#[allow(warnings)]
// Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
-@@ -289,6 +290,7 @@ fn write_unaligned_drop() {
- }
- DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
+@@ -277,6 +277,7 @@ pub fn test_variadic_fnptr() {
+ let mut s = SipHasher::new();
+ assert_eq!(p.hash(&mut s), q.hash(&mut s));
}
+*/
#[test]
- fn align_offset_zst() {
+ fn write_unaligned_drop() {
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 6609bc3..241b497 100644
--- a/library/core/tests/slice.rs
#[test]
fn cell_allows_array_cycle() {
-diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
-index a17c094..5bb11d2 100644
---- a/library/core/tests/num/mod.rs
-+++ b/library/core/tests/num/mod.rs
-@@ -651,11 +651,12 @@ macro_rules! test_float {
- assert_eq!((9.0 as $fty).min($neginf), $neginf);
- assert_eq!(($neginf as $fty).min(-9.0), $neginf);
- assert_eq!((-9.0 as $fty).min($neginf), $neginf);
-- assert_eq!(($nan as $fty).min(9.0), 9.0);
-- assert_eq!(($nan as $fty).min(-9.0), -9.0);
-- assert_eq!((9.0 as $fty).min($nan), 9.0);
-- assert_eq!((-9.0 as $fty).min($nan), -9.0);
-- assert!(($nan as $fty).min($nan).is_nan());
-+ // Cranelift fmin has NaN propagation
-+ //assert_eq!(($nan as $fty).min(9.0), 9.0);
-+ //assert_eq!(($nan as $fty).min(-9.0), -9.0);
-+ //assert_eq!((9.0 as $fty).min($nan), 9.0);
-+ //assert_eq!((-9.0 as $fty).min($nan), -9.0);
-+ //assert!(($nan as $fty).min($nan).is_nan());
- }
- #[test]
- fn max() {
-@@ -673,11 +674,12 @@ macro_rules! test_float {
- assert_eq!((9.0 as $fty).max($neginf), 9.0);
- assert_eq!(($neginf as $fty).max(-9.0), -9.0);
- assert_eq!((-9.0 as $fty).max($neginf), -9.0);
-- assert_eq!(($nan as $fty).max(9.0), 9.0);
-- assert_eq!(($nan as $fty).max(-9.0), -9.0);
-- assert_eq!((9.0 as $fty).max($nan), 9.0);
-- assert_eq!((-9.0 as $fty).max($nan), -9.0);
-- assert!(($nan as $fty).max($nan).is_nan());
-+ // Cranelift fmax has NaN propagation
-+ //assert_eq!(($nan as $fty).max(9.0), 9.0);
-+ //assert_eq!(($nan as $fty).max(-9.0), -9.0);
-+ //assert_eq!((9.0 as $fty).max($nan), 9.0);
-+ //assert_eq!((-9.0 as $fty).max($nan), -9.0);
-+ //assert!(($nan as $fty).max($nan).is_nan());
- }
- #[test]
- fn rem_euclid() {
--
2.21.0 (Apple Git-122)
-From 894e07dfec2624ba539129b1c1d63e1d7d812bda Mon Sep 17 00:00:00 2001
+From 6a4e6f5dc8c8a529a822eb9b57f9e57519595439 Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Thu, 18 Feb 2021 18:45:28 +0100
Subject: [PATCH] Disable 128bit atomic operations
Cranelift doesn't support them yet
---
- library/core/src/sync/atomic.rs | 38 ---------------------------------
- library/core/tests/atomic.rs | 4 ----
- library/std/src/panic.rs | 6 ------
+ library/core/src/panic/unwind_safe.rs | 6 -----
+ library/core/src/sync/atomic.rs | 38 ---------------------------
+ library/core/tests/atomic.rs | 4 ---
3 files changed, 48 deletions(-)
+diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
+index 092b7cf..158cf71 100644
+--- a/library/core/src/panic/unwind_safe.rs
++++ b/library/core/src/panic/unwind_safe.rs
+@@ -216,9 +216,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicI32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for crate::sync::atomic::AtomicI64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for crate::sync::atomic::AtomicI128 {}
+
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+@@ -235,9 +232,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicU32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for crate::sync::atomic::AtomicU64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for crate::sync::atomic::AtomicU128 {}
+
+ #[cfg(target_has_atomic_load_store = "8")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
-index 81c9e1d..65c9503 100644
+index 0194c58..25a0038 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
-@@ -2228,44 +2228,6 @@ atomic_int! {
+@@ -2229,44 +2229,6 @@ atomic_int! {
"AtomicU64::new(0)",
u64 AtomicU64 ATOMIC_U64_INIT
}
macro_rules! atomic_int_ptr_sized {
( $($target_pointer_width:literal $align:literal)* ) => { $(
diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
-index 2d1e449..cb6da5d 100644
+index b735957..ea728b6 100644
--- a/library/core/tests/atomic.rs
+++ b/library/core/tests/atomic.rs
-@@ -145,10 +145,6 @@ fn atomic_alignment() {
+@@ -185,10 +185,6 @@ fn atomic_alignment() {
assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
#[cfg(target_has_atomic = "64")]
assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
#[cfg(target_has_atomic = "ptr")]
assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
#[cfg(target_has_atomic = "ptr")]
-diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
-index 89a822a..779fd88 100644
---- a/library/std/src/panic.rs
-+++ b/library/std/src/panic.rs
-@@ -279,9 +279,6 @@ impl RefUnwindSafe for atomic::AtomicI32 {}
- #[cfg(target_has_atomic_load_store = "64")]
- #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
- impl RefUnwindSafe for atomic::AtomicI64 {}
--#[cfg(target_has_atomic_load_store = "128")]
--#[unstable(feature = "integer_atomics", issue = "32976")]
--impl RefUnwindSafe for atomic::AtomicI128 {}
-
- #[cfg(target_has_atomic_load_store = "ptr")]
- #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
-@@ -298,9 +295,6 @@ impl RefUnwindSafe for atomic::AtomicU32 {}
- #[cfg(target_has_atomic_load_store = "64")]
- #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
- impl RefUnwindSafe for atomic::AtomicU64 {}
--#[cfg(target_has_atomic_load_store = "128")]
--#[unstable(feature = "integer_atomics", issue = "32976")]
--impl RefUnwindSafe for atomic::AtomicU128 {}
-
- #[cfg(target_has_atomic_load_store = "8")]
- #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
--
2.26.2.7.g19db9cfb68
[toolchain]
-channel = "nightly-2021-07-07"
+channel = "nightly-2021-08-05"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
);
std::array::IntoIter::new(["rustc".to_string()])
.chain(env::args().skip(2))
- .chain(["--".to_string(), "-Cllvm-args=mode=jit".to_string()])
+ .chain([
+ "--".to_string(),
+ "-Zunstable-features".to_string(),
+ "-Cllvm-args=mode=jit".to_string(),
+ ])
.collect()
}
Some("lazy-jit") => {
);
std::array::IntoIter::new(["rustc".to_string()])
.chain(env::args().skip(2))
- .chain(["--".to_string(), "-Cllvm-args=mode=jit-lazy".to_string()])
+ .chain([
+ "--".to_string(),
+ "-Zunstable-features".to_string(),
+ "-Cllvm-args=mode=jit-lazy".to_string(),
+ ])
.collect()
}
_ => env::args().skip(1).collect(),
source scripts/config.sh
RUSTC="$(pwd)/build/bin/cg_clif"
popd
-PROFILE=$1 OUTPUT=$2 exec $RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic $0
+PROFILE=$1 OUTPUT=$2 exec $RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic $0
#*/
//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
[dependencies]
core = { path = "../core" }
-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
-+compiler_builtins = { version = "0.1.45", features = ['rustc-dep-of-std', 'no-asm'] }
++compiler_builtins = { version = "0.1.46", features = ['rustc-dep-of-std', 'no-asm'] }
[dev-dependencies]
rand = "0.7"
rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
rm src/test/ui/cfg/cfg-panic.rs
-rm src/test/ui/default-alloc-error-hook.rs
rm -r src/test/ui/hygiene/
rm -r src/test/ui/polymorphization/ # polymorphization not yet supported
if [[ "$JIT_SUPPORTED" = "1" ]]; then
echo "[JIT] mini_core_hello_world"
- CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
echo "[JIT-lazy] mini_core_hello_world"
- CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
else
echo "[JIT] mini_core_hello_world (skipped)"
fi
if [[ "$JIT_SUPPORTED" = "1" ]]; then
echo "[JIT] std_example"
- $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+ $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
echo "[JIT-lazy] std_example"
- $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+ $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
else
echo "[JIT] std_example (skipped)"
fi
../build/cargo build --tests --target $TARGET_TRIPLE
fi
popd
+
+ pushd stdsimd
+ echo "[TEST] rust-lang/stdsimd"
+ ../build/cargo clean
+ ../build/cargo build --all-targets --target $TARGET_TRIPLE
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ ../build/cargo test -q
+ fi
+ popd
}
case "$1" in
use rustc_target::abi::call::{Conv, FnAbi};
use rustc_target::spec::abi::Abi;
-use cranelift_codegen::ir::AbiParam;
-use smallvec::smallvec;
+use cranelift_codegen::ir::{AbiParam, SigRef};
use self::pass_mode::*;
use crate::prelude::*;
-pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
+pub(crate) use self::returning::codegen_return;
fn clif_sig_from_fn_abi<'tcx>(
tcx: TyCtxt<'tcx>,
// not mutated by the current function, this is necessary to support unsized arguments.
if let ArgKind::Normal(Some(val)) = arg_kind {
if let Some((addr, meta)) = val.try_to_ptr() {
- let local_decl = &fx.mir.local_decls[local];
- // v this ! is important
- let internally_mutable = !val
- .layout()
- .ty
- .is_freeze(fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all());
- if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
- // We wont mutate this argument, so it is fine to borrow the backing storage
- // of this argument, to prevent a copy.
-
- let place = if let Some(meta) = meta {
- CPlace::for_ptr_with_extra(addr, meta, val.layout())
- } else {
- CPlace::for_ptr(addr, val.layout())
- };
-
- self::comments::add_local_place_comments(fx, place, local);
-
- assert_eq!(fx.local_map.push(place), local);
- continue;
- }
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so it is fine to borrow the backing storage of this argument
+ // to prevent a copy.
+
+ let place = if let Some(meta) = meta {
+ CPlace::for_ptr_with_extra(addr, meta, val.layout())
+ } else {
+ CPlace::for_ptr(addr, val.layout())
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ assert_eq!(fx.local_map.push(place), local);
+ continue;
}
}
fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
}
+struct CallArgument<'tcx> {
+ value: CValue<'tcx>,
+ is_owned: bool,
+}
+
+// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
+fn codegen_call_argument_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CallArgument<'tcx> {
+ CallArgument {
+ value: codegen_operand(fx, operand),
+ is_owned: matches!(operand, Operand::Move(_)),
+ }
+}
+
pub(crate) fn codegen_terminator_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
span: Span,
}
// Unpack arguments tuple for closures
- let args = if fn_sig.abi == Abi::RustCall {
+ let mut args = if fn_sig.abi == Abi::RustCall {
assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
- let self_arg = codegen_operand(fx, &args[0]);
- let pack_arg = codegen_operand(fx, &args[1]);
+ let self_arg = codegen_call_argument_operand(fx, &args[0]);
+ let pack_arg = codegen_call_argument_operand(fx, &args[1]);
- let tupled_arguments = match pack_arg.layout().ty.kind() {
+ let tupled_arguments = match pack_arg.value.layout().ty.kind() {
ty::Tuple(ref tupled_arguments) => tupled_arguments,
_ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
};
let mut args = Vec::with_capacity(1 + tupled_arguments.len());
args.push(self_arg);
for i in 0..tupled_arguments.len() {
- args.push(pack_arg.value_field(fx, mir::Field::new(i)));
+ args.push(CallArgument {
+ value: pack_arg.value.value_field(fx, mir::Field::new(i)),
+ is_owned: pack_arg.is_owned,
+ });
}
args
} else {
- args.iter().map(|arg| codegen_operand(fx, arg)).collect::<Vec<_>>()
+ args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
};
- // | indirect call target
- // | | the first argument to be passed
- // v v
- let (func_ref, first_arg) = match instance {
+ // Pass the caller location for `#[track_caller]`.
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ let caller_location = fx.get_caller_location(span);
+ args.push(CallArgument { value: caller_location, is_owned: false });
+ }
+
+ let args = args;
+ assert_eq!(fn_abi.args.len(), args.len());
+
+ enum CallTarget {
+ Direct(FuncRef),
+ Indirect(SigRef, Value),
+ }
+
+ let (func_ref, first_arg_override) = match instance {
// Trait object call
Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
if fx.clif_comments.enabled() {
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(
nop_inst,
- format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0],),
+ format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
);
}
- let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
- (Some(method), smallvec![ptr])
+
+ let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, method), Some(ptr))
}
// Normal call
- Some(_) => (
- None,
- args.get(0)
- .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
- .unwrap_or(smallvec![]),
- ),
+ Some(instance) => {
+ let func_ref = fx.get_function_ref(instance);
+ (CallTarget::Direct(func_ref), None)
+ }
// Indirect call
None => {
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(nop_inst, "indirect call");
}
+
let func = codegen_operand(fx, func).load_scalar(fx);
- (
- Some(func),
- args.get(0)
- .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
- .unwrap_or(smallvec![]),
- )
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, func), None)
}
};
let ret_place = destination.map(|(place, _)| place);
- let (call_inst, call_args) = self::returning::codegen_with_call_return_arg(
- fx,
- &fn_abi.ret,
- ret_place,
- |fx, return_ptr| {
- let regular_args_count = args.len();
- let mut call_args: Vec<Value> = return_ptr
- .into_iter()
- .chain(first_arg.into_iter())
- .chain(
- args.into_iter()
- .enumerate()
- .skip(1)
- .map(|(i, arg)| adjust_arg_for_abi(fx, arg, &fn_abi.args[i]).into_iter())
- .flatten(),
- )
- .collect::<Vec<_>>();
-
- if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
- // Pass the caller location for `#[track_caller]`.
- let caller_location = fx.get_caller_location(span);
- call_args.extend(
- adjust_arg_for_abi(fx, caller_location, &fn_abi.args[regular_args_count])
- .into_iter(),
- );
- assert_eq!(fn_abi.args.len(), regular_args_count + 1);
- } else {
- assert_eq!(fn_abi.args.len(), regular_args_count);
+ self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
+ let call_args = return_ptr
+ .into_iter()
+ .chain(first_arg_override.into_iter())
+ .chain(
+ args.into_iter()
+ .enumerate()
+ .skip(if first_arg_override.is_some() { 1 } else { 0 })
+ .map(|(i, arg)| {
+ adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
+ })
+ .flatten(),
+ )
+ .collect::<Vec<Value>>();
+
+ let call_inst = match func_ref {
+ CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
+ CallTarget::Indirect(sig, func_ptr) => {
+ fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
}
+ };
- let call_inst = if let Some(func_ref) = func_ref {
- let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
- let sig = fx.bcx.import_signature(sig);
- fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
- } else {
- let func_ref =
- fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
- fx.bcx.ins().call(func_ref, &call_args)
- };
-
- (call_inst, call_args)
- },
- );
-
- // FIXME find a cleaner way to support varargs
- if fn_sig.c_variadic {
- if !matches!(fn_sig.abi, Abi::C { .. }) {
- fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
+ // FIXME find a cleaner way to support varargs
+ if fn_sig.c_variadic {
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
+ fx.tcx
+ .sess
+ .span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
+ }
+ let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+ let abi_params = call_args
+ .into_iter()
+ .map(|arg| {
+ let ty = fx.bcx.func.dfg.value_type(arg);
+ if !ty.is_int() {
+ // FIXME set %al to upperbound on float args once floats are supported
+ fx.tcx
+ .sess
+ .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+ }
+ AbiParam::new(ty)
+ })
+ .collect::<Vec<AbiParam>>();
+ fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
}
- let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
- let abi_params = call_args
- .into_iter()
- .map(|arg| {
- let ty = fx.bcx.func.dfg.value_type(arg);
- if !ty.is_int() {
- // FIXME set %al to upperbound on float args once floats are supported
- fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
- }
- AbiParam::new(ty)
- })
- .collect::<Vec<AbiParam>>();
- fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
- }
+
+ call_inst
+ });
if let Some((_, dest)) = destination {
let ret_block = fx.get_block(dest);
TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
)),
);
- let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0]);
+ let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(
- adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1]).into_iter(),
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
);
}
fx: &mut FunctionCx<'_, '_, 'tcx>,
arg: CValue<'tcx>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ is_owned: bool,
) -> SmallVec<[Value; 2]> {
assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty);
match arg_abi.mode {
smallvec![a, b]
}
PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
- PassMode::Indirect { .. } => match arg.force_stack(fx) {
- (ptr, None) => smallvec![ptr.get_addr(fx)],
- (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
- },
+ PassMode::Indirect { .. } => {
+ if is_owned {
+ match arg.force_stack(fx) {
+ (ptr, None) => smallvec![ptr.get_addr(fx)],
+ (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
+ }
+ } else {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so we must make a copy of the argument unless the argument
+ // local is moved.
+ let place = CPlace::new_stack_slot(fx, arg.layout());
+ place.write_cvalue(fx, arg);
+ smallvec![place.to_ptr().get_addr(fx)]
+ }
+ }
}
}
use crate::prelude::*;
-use rustc_middle::ty::layout::FnAbiExt;
-use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::call::{ArgAbi, PassMode};
use smallvec::{smallvec, SmallVec};
-/// Can the given type be returned into an ssa var or does it need to be returned on the stack.
-pub(crate) fn can_return_to_ssa_var<'tcx>(
- fx: &FunctionCx<'_, '_, 'tcx>,
- func: &mir::Operand<'tcx>,
- args: &[mir::Operand<'tcx>],
-) -> bool {
- let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
- let fn_sig =
- fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
-
- // Handle special calls like instrinsics and empty drop glue.
- let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
- let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
- .unwrap()
- .unwrap()
- .polymorphize(fx.tcx);
-
- match instance.def {
- InstanceDef::Intrinsic(_) | InstanceDef::DropGlue(_, _) => {
- return true;
- }
- _ => Some(instance),
- }
- } else {
- None
- };
-
- let extra_args = &args[fn_sig.inputs().len()..];
- let extra_args = extra_args
- .iter()
- .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
- .collect::<Vec<_>>();
- let fn_abi = if let Some(instance) = instance {
- FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
- } else {
- FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
- };
- match fn_abi.ret.mode {
- PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => true,
- // FIXME Make it possible to return Cast and Indirect to an ssa var.
- PassMode::Cast(_) | PassMode::Indirect { .. } => false,
- }
-}
-
/// Return a place where the return value of the current function can be written to. If necessary
/// this adds an extra parameter pointing to where the return value needs to be stored.
pub(super) fn codegen_return_param<'tcx>(
block_params_iter: &mut impl Iterator<Item = Value>,
) -> CPlace<'tcx> {
let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Ignore => (CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout), smallvec![]),
- PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
(
super::make_local_place(
}
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
let ret_param = block_params_iter.next().unwrap();
- assert_eq!(fx.bcx.func.dfg.value_type(ret_param), pointer_ty(fx.tcx));
+ assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
(
CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
smallvec![ret_param],
/// Invokes the closure with if necessary a value representing the return pointer. When the closure
/// returns the call return value(s) if any are written to the correct place.
-pub(super) fn codegen_with_call_return_arg<'tcx, T>(
+pub(super) fn codegen_with_call_return_arg<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
ret_place: Option<CPlace<'tcx>>,
- f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> (Inst, T),
-) -> (Inst, T) {
- let return_ptr = match ret_arg_abi.mode {
- PassMode::Ignore => None,
+ f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst,
+) {
+ let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
+ PassMode::Ignore => (None, None),
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => match ret_place {
- Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
- None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
+ Some(ret_place) if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) => {
+ // This is an optimization to prevent unnecessary copies of the return value when
+ // the return place is already a memory place as opposed to a register.
+ // This match arm can be safely removed.
+ (None, Some(ret_place.to_ptr().get_addr(fx)))
+ }
+ _ => {
+ let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
+ (Some(place), Some(place.to_ptr().get_addr(fx)))
+ }
},
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
- PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => None,
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => (None, None),
};
- let (call_inst, meta) = f(fx, return_ptr);
+ let call_inst = f(fx, return_ptr);
match ret_arg_abi.mode {
PassMode::Ignore => {}
ret_place.write_cvalue(fx, result);
}
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {}
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if let (Some(ret_place), Some(ret_temp_place)) = (ret_place, ret_temp_place) {
+ // Both ret_place and ret_temp_place must be Some. If ret_place is None, this is
+ // a non-returning call. If ret_temp_place is None, it is not necessary to copy the
+ // return value.
+ let ret_temp_value = ret_temp_place.to_cvalue(fx);
+ ret_place.write_cvalue(fx, ret_temp_value);
+ }
+ }
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
}
-
- (call_inst, meta)
}
/// Codegen a return instruction with the right return value(s) if any.
use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
-use rustc_span::symbol::sym;
/// Returns whether an allocator shim was created
pub(crate) fn codegen(
if any_dynamic_crate {
false
} else if let Some(kind) = tcx.allocator_kind(()) {
- codegen_inner(module, unwind_context, kind);
+ codegen_inner(module, unwind_context, kind, tcx.lang_items().oom().is_some());
true
} else {
false
module: &mut impl Module,
unwind_context: &mut UnwindContext,
kind: AllocatorKind,
+ has_alloc_error_handler: bool,
) {
let usize_ty = module.target_config().pointer_type();
let caller_name = format!("__rust_{}", method.name);
let callee_name = kind.fn_name(method.name);
- //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
returns: vec![],
};
- let callee_name = kind.fn_name(sym::oom);
- //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+ let callee_name = if has_alloc_error_handler { "__rg_oom" } else { "__rdl_oom" };
let func_id =
module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
- let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+ let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
_ => {}
}
}
-
- match &bb.terminator().kind {
- TerminatorKind::Call { destination, func, args, .. } => {
- if let Some((dest_place, _dest_bb)) = destination {
- if !crate::abi::can_return_to_ssa_var(fx, func, args) {
- not_ssa(&mut flag_map, dest_place.local)
- }
- }
- }
- _ => {}
- }
}
flag_map
crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
let test_zero = if is_inverted { !test_zero } else { test_zero };
let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
- let discr =
- crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
&fx.bcx, discr, test_zero,
) {
(_, _) if from == to => val,
// extend
- (_, types::I128) => {
- let lo = if from == types::I64 {
- val
- } else if signed {
- fx.bcx.ins().sextend(types::I64, val)
- } else {
- fx.bcx.ins().uextend(types::I64, val)
- };
- let hi = if signed {
- fx.bcx.ins().sshr_imm(lo, 63)
- } else {
- fx.bcx.ins().iconst(types::I64, 0)
- };
- fx.bcx.ins().iconcat(lo, hi)
- }
(_, _) if to.wider_or_equal(from) => {
if signed {
fx.bcx.ins().sextend(to, val)
}
// reduce
- (types::I128, _) => {
- let (lsb, _msb) = fx.bcx.ins().isplit(val);
- if to == types::I64 { lsb } else { fx.bcx.ins().ireduce(to, lsb) }
- }
(_, _) => fx.bcx.ins().ireduce(to, val),
}
}
return None;
}
- let lhs_val = lhs.load_scalar(fx);
- let rhs_val = rhs.load_scalar(fx);
-
let is_signed = type_sign(lhs.layout().ty);
match bin_op {
None
}
BinOp::Add | BinOp::Sub if !checked => None,
- BinOp::Mul if !checked => {
- let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
- if fx.tcx.sess.target.is_like_windows {
- let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
- let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
- let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
- assert!(lhs_extra.is_none());
- assert!(rhs_extra.is_none());
- let args =
- [ret_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
- fx.lib_call(
- "__multi3",
+ BinOp::Mul if !checked || is_signed => {
+ if !checked {
+ let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+ if fx.tcx.sess.target.is_like_windows {
+ let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ let args = [
+ ret_place.to_ptr().get_addr(fx),
+ lhs_ptr.get_addr(fx),
+ rhs_ptr.get_addr(fx),
+ ];
+ fx.lib_call(
+ "__multi3",
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ &args,
+ );
+ Some(ret_place.to_cvalue(fx))
+ } else {
+ Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
+ }
+ } else {
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+ let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
+ let lhs = lhs.load_scalar(fx);
+ let rhs = rhs.load_scalar(fx);
+ let oflow_ptr = oflow.to_ptr().get_addr(fx);
+ let res = fx.lib_call(
+ "__muloti4",
vec![
- AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
- AbiParam::new(pointer_ty(fx.tcx)),
- AbiParam::new(pointer_ty(fx.tcx)),
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ AbiParam::new(fx.pointer_type),
],
- vec![],
- &args,
- );
- Some(ret_place.to_cvalue(fx))
- } else {
- Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
+ vec![AbiParam::new(types::I128)],
+ &[lhs, rhs, oflow_ptr],
+ )[0];
+ let oflow = oflow.to_cvalue(fx).load_scalar(fx);
+ let oflow = fx.bcx.ins().ireduce(types::I8, oflow);
+ Some(CValue::by_val_pair(res, oflow, fx.layout_of(out_ty)))
}
}
BinOp::Add | BinOp::Sub | BinOp::Mul => {
assert!(rhs_extra.is_none());
(
vec![
- AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
- AbiParam::new(pointer_ty(fx.tcx)),
- AbiParam::new(pointer_ty(fx.tcx)),
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
],
[out_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)],
)
} else {
(
vec![
- AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
AbiParam::new(types::I128),
AbiParam::new(types::I128),
],
(BinOp::Sub, false) => "__rust_u128_subo",
(BinOp::Sub, true) => "__rust_i128_subo",
(BinOp::Mul, false) => "__rust_u128_mulo",
- (BinOp::Mul, true) => "__rust_i128_mulo",
_ => unreachable!(),
};
fx.lib_call(name, param_types, vec![], &args);
let args = [lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
let ret = fx.lib_call(
name,
- vec![AbiParam::new(pointer_ty(fx.tcx)), AbiParam::new(pointer_ty(fx.tcx))],
+ vec![AbiParam::new(fx.pointer_type), AbiParam::new(fx.pointer_type)],
vec![AbiParam::new(types::I64X2)],
&args,
)[0];
assert!(!checked);
None
}
- BinOp::Shl | BinOp::Shr => {
- let is_overflow = if checked {
- // rhs >= 128
-
- // FIXME support non 128bit rhs
- /*let (rhs_lsb, rhs_msb) = fx.bcx.ins().isplit(rhs_val);
- let rhs_msb_gt_0 = fx.bcx.ins().icmp_imm(IntCC::NotEqual, rhs_msb, 0);
- let rhs_lsb_ge_128 = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, rhs_lsb, 127);
- let is_overflow = fx.bcx.ins().bor(rhs_msb_gt_0, rhs_lsb_ge_128);*/
- let is_overflow = fx.bcx.ins().bconst(types::B1, false);
-
- Some(fx.bcx.ins().bint(types::I8, is_overflow))
- } else {
- None
- };
-
- let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
- let val = match bin_op {
- BinOp::Shl => fx.bcx.ins().ishl(lhs_val, truncated_rhs),
- BinOp::Shr => {
- if is_signed {
- fx.bcx.ins().sshr(lhs_val, truncated_rhs)
- } else {
- fx.bcx.ins().ushr(lhs_val, truncated_rhs)
- }
- }
- _ => unreachable!(),
- };
- if let Some(is_overflow) = is_overflow {
- let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
- Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
- } else {
- Some(CValue::by_val(val, lhs.layout()))
- }
- }
+ BinOp::Shl | BinOp::Shr => None,
}
}
let val = match eh_pe.application() {
gimli::DW_EH_PE_absptr => val,
gimli::DW_EH_PE_pcrel => {
- // TODO: better handling of sign
+ // FIXME better handling of sign
let offset = self.len() as u64;
offset.wrapping_sub(val)
}
pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
let encoding = Encoding {
format: Format::Dwarf32,
- // TODO: this should be configurable
+ // FIXME this should be configurable
// macOS doesn't seem to support DWARF > 3
// 5 version is required for md5 file hash
version: if tcx.sess.target.is_like_osx {
assert_eq!(lane_count, ret_lane_count);
for lane_idx in 0..lane_count {
- let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
- let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
- ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
}
}
let ret_lane_layout = fx.layout_of(ret_lane_ty);
assert_eq!(lane_count, ret_lane_count);
- for lane in 0..lane_count {
- let lane = mir::Field::new(lane.try_into().unwrap());
- let x_lane = x.value_field(fx, lane).load_scalar(fx);
- let y_lane = y.value_field(fx, lane).load_scalar(fx);
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
- ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
}
}
fn simd_reduce<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
val: CValue<'tcx>,
+ acc: Option<Value>,
ret: CPlace<'tcx>,
f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
) {
let lane_layout = fx.layout_of(lane_ty);
assert_eq!(lane_layout, ret.layout());
- let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
- for lane_idx in 1..lane_count {
- let lane =
- val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
res_val = f(fx, lane_layout, res_val, lane);
}
let res = CValue::by_val(res_val, lane_layout);
ret.write_cvalue(fx, res);
}
+// FIXME move all uses to `simd_reduce`
fn simd_reduce_bool<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
val: CValue<'tcx>,
let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
assert!(ret.layout().ty.is_bool());
- let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
for lane_idx in 1..lane_count {
- let lane =
- val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
res_val = f(fx, res_val, lane);
}
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
let res = CValue::by_val(res_val, ret.layout());
ret.write_cvalue(fx, res);
}
if let Some(vector_ty) = vector_ty {
let x = $x.load_scalar($fx);
let y = $y.load_scalar($fx);
- let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
+ let val = if vector_ty.lane_type().is_float() {
+ $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
+ } else {
+ $fx.bcx.ins().icmp(IntCC::$cc, x, y)
+ };
// HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
let (val, has_overflow) = checked_res.load_scalar_pair(fx);
let clif_ty = fx.clif_type(T).unwrap();
- // `select.i8` is not implemented by Cranelift.
- let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
-
let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
let val = match (intrinsic, signed) {
};
rotate_left, <T>(v x, v y) {
let layout = fx.layout_of(T);
- let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
- fx.bcx.ins().ireduce(types::I64, y)
- } else {
- y
- };
let res = fx.bcx.ins().rotl(x, y);
ret.write_cvalue(fx, CValue::by_val(res, layout));
};
rotate_right, <T>(v x, v y) {
let layout = fx.layout_of(T);
- let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
- fx.bcx.ins().ireduce(types::I64, y)
- } else {
- y
- };
let res = fx.bcx.ins().rotr(x, y);
ret.write_cvalue(fx, CValue::by_val(res, layout));
};
};
ctlz | ctlz_nonzero, <T> (v arg) {
// FIXME trap on `ctlz_nonzero` with zero arg.
- let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
- // FIXME verify this algorithm is correct
- let (lsb, msb) = fx.bcx.ins().isplit(arg);
- let lsb_lz = fx.bcx.ins().clz(lsb);
- let msb_lz = fx.bcx.ins().clz(msb);
- let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
- let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
- let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
- fx.bcx.ins().uextend(types::I128, res)
- } else {
- fx.bcx.ins().clz(arg)
- };
+ let res = fx.bcx.ins().clz(arg);
let res = CValue::by_val(res, fx.layout_of(T));
ret.write_cvalue(fx, res);
};
cttz | cttz_nonzero, <T> (v arg) {
// FIXME trap on `cttz_nonzero` with zero arg.
- let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
- // FIXME verify this algorithm is correct
- let (lsb, msb) = fx.bcx.ins().isplit(arg);
- let lsb_tz = fx.bcx.ins().ctz(lsb);
- let msb_tz = fx.bcx.ins().ctz(msb);
- let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
- let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
- let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
- fx.bcx.ins().uextend(types::I128, res)
- } else {
- fx.bcx.ins().ctz(arg)
- };
+ let res = fx.bcx.ins().ctz(arg);
let res = CValue::by_val(res, fx.layout_of(T));
ret.write_cvalue(fx, res);
};
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
-
- // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
_ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
ret.write_cvalue(fx, old);
};
+ // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+ // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+ // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+ // a float against itself. Only in case of NaN is it not equal to itself.
minnumf32, (v a, v b) {
- let val = fx.bcx.ins().fmin(a, b);
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
ret.write_cvalue(fx, val);
};
minnumf64, (v a, v b) {
- let val = fx.bcx.ins().fmin(a, b);
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
ret.write_cvalue(fx, val);
};
maxnumf32, (v a, v b) {
- let val = fx.bcx.ins().fmax(a, b);
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
ret.write_cvalue(fx, val);
};
maxnumf64, (v a, v b) {
- let val = fx.bcx.ins().fmax(a, b);
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
ret.write_cvalue(fx, val);
};
}
let size = fx.layout_of(T).layout.size;
+ // FIXME add and use emit_small_memcmp
let is_eq_value =
if size == Size::ZERO {
// No bytes means they're trivially equal
} else {
// Just call `memcmp` (like slices do in core) when the
// size is too large or it's not a power-of-two.
- let ptr_ty = pointer_ty(fx.tcx);
let signed_bytes = i64::try_from(size.bytes()).unwrap();
- let bytes_val = fx.bcx.ins().iconst(ptr_ty, signed_bytes);
- let params = vec![AbiParam::new(ptr_ty); 3];
+ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
let returns = vec![AbiParam::new(types::I32)];
let args = &[lhs_ref, rhs_ref, bytes_val];
let cmp = fx.lib_call("memcmp", params, returns, args)[0];
for (out_idx, in_idx) in indexes.into_iter().enumerate() {
let in_lane = if u64::from(in_idx) < lane_count {
- x.value_field(fx, mir::Field::new(in_idx.into()))
+ x.value_lane(fx, in_idx.into())
} else {
- y.value_field(fx, mir::Field::new(usize::from(in_idx) - usize::try_from(lane_count).unwrap()))
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
};
- let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
out_lane.write_cvalue(fx, in_lane);
}
};
fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
}
- let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
ret.write_cvalue(fx, ret_lane);
};
+ simd_neg, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
+ let ret_lane = match lane_layout.ty.kind() {
+ ty::Int(_) => fx.bcx.ins().ineg(lane),
+ ty::Float(_) => fx.bcx.ins().fneg(lane),
+ _ => unreachable!(),
+ };
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+
+ simd_fabs, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
+ let ret_lane = fx.bcx.ins().fabs(lane);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+
+ simd_fsqrt, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
+ let ret_lane = fx.bcx.ins().sqrt(lane);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+
simd_add, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
};
+ simd_rem, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().urem(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().srem(x_lane, y_lane),
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "fmodf",
+ vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[x_lane, y_lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "fmod",
+ vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[x_lane, y_lane],
+ )[0],
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ });
+ };
simd_shl, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_int_binop!(fx, ishl(x, y) -> ret);
let ret_lane_layout = fx.layout_of(ret_lane_ty);
for lane in 0..lane_count {
- let lane = mir::Field::new(lane.try_into().unwrap());
- let a_lane = a.value_field(fx, lane).load_scalar(fx);
- let b_lane = b.value_field(fx, lane).load_scalar(fx);
- let c_lane = c.value_field(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+ let c_lane = c.value_lane(fx, lane).load_scalar(fx);
let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
- ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
}
};
simd_flt_binop!(fx, fmax(x, y) -> ret);
};
- simd_reduce_add_ordered | simd_reduce_add_unordered, (c v) {
+ simd_round, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "roundf",
+ vec![AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "round",
+ vec![AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[lane],
+ )[0],
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ });
+ };
+ simd_ceil, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
+ let ret_lane = fx.bcx.ins().ceil(lane);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+ simd_floor, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
+ let ret_lane = fx.bcx.ins().floor(lane);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+ simd_trunc, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
+ let ret_lane = fx.bcx.ins().trunc(lane);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+
+ simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) {
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+ simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| {
if lane_layout.ty.is_floating_point() {
fx.bcx.ins().fadd(a, b)
} else {
});
};
- simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v) {
+ simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) {
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+ simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| {
if lane_layout.ty.is_floating_point() {
fx.bcx.ins().fmul(a, b)
} else {
simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
};
- // simd_fabs
- // simd_saturating_add
+ simd_reduce_and, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().band(a, b));
+ };
+
+ simd_reduce_or, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bor(a, b));
+ };
+
+ simd_reduce_xor, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bxor(a, b));
+ };
+
+ simd_reduce_min, (c v) {
+ // FIXME support floats
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, None, ret, |fx, layout, a, b| {
+ let lt = fx.bcx.ins().icmp(if layout.ty.is_signed() {
+ IntCC::SignedLessThan
+ } else {
+ IntCC::UnsignedLessThan
+ }, a, b);
+ fx.bcx.ins().select(lt, a, b)
+ });
+ };
+
+ simd_reduce_max, (c v) {
+ // FIXME support floats
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, None, ret, |fx, layout, a, b| {
+ let gt = fx.bcx.ins().icmp(if layout.ty.is_signed() {
+ IntCC::SignedGreaterThan
+ } else {
+ IntCC::UnsignedGreaterThan
+ }, a, b);
+ fx.bcx.ins().select(gt, a, b)
+ });
+ };
+
+ simd_select, (c m, c a, c b) {
+ validate_simd_type!(fx, intrinsic, span, m.layout().ty);
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let m_lane = m.value_lane(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
+ let res_lane = CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ };
+
+ // simd_saturating_*
// simd_bitmask
- // simd_select
- // simd_rem
- // simd_neg
- // simd_trunc
- // simd_floor
+ // simd_scatter
+ // simd_gather
}
}
let config = if let Some(config) = self.config.clone() {
config
} else {
+ if !tcx.sess.unstable_options() && !tcx.sess.opts.cg.llvm_args.is_empty() {
+ tcx.sess.fatal("`-Z unstable-options` must be passed to allow configuring cg_clif");
+ }
BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
.unwrap_or_else(|err| tcx.sess.fatal(&err))
};
) -> Result<(), ErrorReported> {
use rustc_codegen_ssa::back::link::link_binary;
- link_binary::<crate::archive::ArArchiveBuilder<'_>>(
- sess,
- &codegen_results,
- outputs,
- )
+ link_binary::<crate::archive::ArArchiveBuilder<'_>>(sess, &codegen_results, outputs)
}
}
fn target_triple(sess: &Session) -> target_lexicon::Triple {
- sess.target.llvm_target.parse().unwrap()
+ match sess.target.llvm_target.parse() {
+ Ok(triple) => triple,
+ Err(err) => sess.fatal(&format!("target not recognized: {}", err)),
+ }
}
fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
}
Some(value) => {
let mut builder =
- cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
+ cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant)
+ .unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
if let Err(_) = builder.enable(value) {
- sess.fatal("The specified target cpu isn't currently supported by Cranelift.");
+ sess.fatal("the specified target cpu isn't currently supported by Cranelift.");
}
builder
}
None => {
let mut builder =
- cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant).unwrap();
+ cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant)
+ .unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
if target_triple.architecture == target_lexicon::Architecture::X86_64 {
// Don't use "haswell" as the default, as it implies `has_lzcnt`.
// macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
- let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
- && (in_lhs.layout().ty.kind() == fx.tcx.types.i8.kind()
- || in_lhs.layout().ty.kind() == fx.tcx.types.i16.kind())
- {
- // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
- (
- fx.bcx.ins().sextend(types::I32, lhs),
- fx.bcx.ins().sextend(types::I32, rhs),
- )
- } else {
- (lhs, rhs)
- };
-
return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
}
_ => {}
}
BinOp::Shl => {
let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
- let val = fx.bcx.ins().ishl(lhs, actual_shift);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let val = fx.bcx.ins().ishl(lhs, masked_shift);
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
}
BinOp::Shr => {
let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
let val = if !signed {
- fx.bcx.ins().ushr(lhs, actual_shift)
+ fx.bcx.ins().ushr(lhs, masked_shift)
} else {
- fx.bcx.ins().sshr(lhs, actual_shift)
+ fx.bcx.ins().sshr(lhs, masked_shift)
};
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
//! Peephole optimizations that can be performed while creating clif ir.
-use cranelift_codegen::ir::{
- condcodes::IntCC, types, InstBuilder, InstructionData, Opcode, Value, ValueDef,
-};
+use cranelift_codegen::ir::{condcodes::IntCC, InstructionData, Opcode, Value, ValueDef};
use cranelift_frontend::FunctionBuilder;
/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
}
}
-pub(crate) fn make_branchable_value(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
- if bcx.func.dfg.value_type(arg).is_bool() {
- return arg;
- }
-
- (|| {
- let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
- arg_inst
- } else {
- return None;
- };
-
- match bcx.func.dfg[arg_inst] {
- // This is the lowering of Rvalue::Not
- InstructionData::Load { opcode: Opcode::Load, arg: ptr, flags, offset } => {
- // Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
- // uextend.i32`. Just `uload8` is much faster.
- match bcx.func.dfg.ctrl_typevar(arg_inst) {
- types::I8 => Some(bcx.ins().uload8(types::I32, flags, ptr, offset)),
- types::I16 => Some(bcx.ins().uload16(types::I32, flags, ptr, offset)),
- _ => None,
- }
- }
- _ => None,
- }
- })()
- .unwrap_or_else(|| {
- match bcx.func.dfg.value_type(arg) {
- types::I8 | types::I16 => {
- // WORKAROUND for brz.i8 and brnz.i8 not yet being implemented
- bcx.ins().uextend(types::I32, arg)
- }
- _ => arg,
- }
- })
-}
-
/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
pub(crate) fn maybe_known_branch_taken(
bcx: &FunctionBuilder<'_>,
Linkage::Import,
&Signature {
call_conv: CallConv::triple_default(fx.triple()),
- params: vec![AbiParam::new(pointer_ty(fx.tcx))],
+ params: vec![AbiParam::new(fx.pointer_type)],
returns: vec![AbiParam::new(types::I32)],
},
)
(&ty::Ref(_, a, _), &ty::Ref(_, b, _))
| (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
- assert!(!fx.layout_of(a).is_unsized());
(src, unsized_info(fx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
- assert!(!fx.layout_of(a).is_unsized());
(src, unsized_info(fx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
let (_, unsized_align) =
crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
- let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
+ let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
- let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
}
}
+ /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn value_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count: _ } => {
+ assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
+ let lane_idx = u8::try_from(lane_idx).unwrap();
+ let lane = fx.bcx.ins().extractlane(val, lane_idx);
+ CValue::by_val(lane, lane_layout)
+ }
+ _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(_, _) => unreachable!(),
+ CValueInner::ByRef(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CValue::by_ref(field_ptr, lane_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => unreachable!(),
+ }
+ }
+
pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
crate::unsize::coerce_unsized_into(fx, self, dest);
}
&self.inner
}
- pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
- CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
- }
-
pub(crate) fn new_stack_slot(
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
assert!(!layout.is_unsized());
if layout.size.bytes() == 0 {
- return CPlace::no_place(layout);
+ return CPlace {
+ inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+ layout,
+ };
}
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
}
}
+ /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn place_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ assert!(matches!(layout.abi, Abi::Vector { .. }));
+ CPlace {
+ inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
+ layout: lane_layout,
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => unreachable!(),
+ CPlaceInner::VarLane(_, _, _) => unreachable!(),
+ CPlaceInner::Addr(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CPlace::for_ptr(field_ptr, lane_layout)
+ }
+ CPlaceInner::Addr(_, Some(_)) => unreachable!(),
+ }
+ }
+
pub(crate) fn place_index(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
- pointer_ty(fx.tcx),
+ fx.pointer_type,
vtable_memflags(),
vtable,
(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE * usize_size) as i32,
pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
- pointer_ty(fx.tcx),
+ fx.pointer_type,
vtable_memflags(),
vtable,
(ty::COMMON_VTABLE_ENTRIES_SIZE * usize_size) as i32,
pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
- pointer_ty(fx.tcx),
+ fx.pointer_type,
vtable_memflags(),
vtable,
(ty::COMMON_VTABLE_ENTRIES_ALIGN * usize_size) as i32,
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
let func_ref = fx.bcx.ins().load(
- pointer_ty(fx.tcx),
+ fx.pointer_type,
vtable_memflags(),
vtable,
(idx * usize_size as usize) as i32,
//! for example:
//!
//! ```shell
-//! $ rustc y.rs -o build/y.bin
-//! $ build/y.bin
+//! $ rustc y.rs -o y.bin
+//! $ ./y.bin
//! ```
//!
//! # Naming
impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
- let args_capacity: usize = self.args.iter().map(|arg|
+ // Ignore "extra" args from the call site for C variadic functions.
+ // Only the "fixed" args are part of the LLVM function signature.
+ let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
+
+ let args_capacity: usize = args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
).sum();
}
};
- for arg in &self.args {
+ for arg in args {
// add padding
if let Some(ty) = arg.pad {
llargument_tys.push(ty.llvm_type(cx));
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
- let ret =
- llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
+ let ret = llvm::LLVMRustBuildCall(
+ llbuilder,
+ ty,
+ callee,
+ args.as_ptr(),
+ args.len() as c_uint,
+ None,
+ );
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
llvm::LLVMBuildRet(llbuilder, ret);
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
- let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
+ let ret =
+ llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None);
llvm::LLVMSetTailCall(ret, True);
llvm::LLVMBuildRetVoid(llbuilder);
llvm::LLVMDisposeBuilder(llbuilder);
alignstack,
llvm::AsmDialect::from_generic(dia),
);
- let call = bx.call(v, inputs, None);
+ let call = bx.call(fty, v, inputs, None);
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
fn invoke(
&mut self,
+ llty: &'ll Type,
llfn: &'ll Value,
args: &[&'ll Value],
then: &'ll BasicBlock,
) -> &'ll Value {
debug!("invoke {:?} with args ({:?})", llfn, args);
- let args = self.check_call("invoke", llfn, args);
+ let args = self.check_call("invoke", llty, llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
+ llty,
llfn,
args.as_ptr(),
args.len() as c_uint,
},
};
- let intrinsic = self.get_intrinsic(&name);
- let res = self.call(intrinsic, &[lhs, rhs], None);
+ let res = self.call_intrinsic(name, &[lhs, rhs]);
(self.extract_value(res, 0), self.extract_value(res, 1))
}
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
let name = format!("llvm.fptoui.sat.i{}.f{}", int_width, float_width);
- let intrinsic = self.get_intrinsic(&name);
- return Some(self.call(intrinsic, &[val], None));
+ return Some(self.call_intrinsic(&name, &[val]));
}
None
let float_width = self.cx.float_width(src_ty);
let int_width = self.cx.int_width(dest_ty);
let name = format!("llvm.fptosi.sat.i{}.f{}", int_width, float_width);
- let intrinsic = self.get_intrinsic(&name);
- return Some(self.call(intrinsic, &[val], None));
+ return Some(self.call_intrinsic(&name, &[val]));
}
None
_ => None,
};
if let Some(name) = name {
- let intrinsic = self.get_intrinsic(name);
- return self.call(intrinsic, &[val], None);
+ return self.call_intrinsic(name, &[val]);
}
}
}
_ => None,
};
if let Some(name) = name {
- let intrinsic = self.get_intrinsic(name);
- return self.call(intrinsic, &[val], None);
+ return self.call_intrinsic(name, &[val]);
}
}
}
);
let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
+ let llty = self.cx.type_func(
+ &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+ self.cx.type_void(),
+ );
let args = &[fn_name, hash, num_counters, index];
- let args = self.check_call("call", llfn, args);
+ let args = self.check_call("call", llty, llfn, args);
unsafe {
let _ = llvm::LLVMRustBuildCall(
self.llbuilder,
+ llty,
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
fn call(
&mut self,
+ llty: &'ll Type,
llfn: &'ll Value,
args: &[&'ll Value],
funclet: Option<&Funclet<'ll>>,
) -> &'ll Value {
debug!("call {:?} with args ({:?})", llfn, args);
- let args = self.check_call("call", llfn, args);
+ let args = self.check_call("call", llty, llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
+ llty,
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
fn check_call<'b>(
&mut self,
typ: &str,
+ fn_ty: &'ll Type,
llfn: &'ll Value,
args: &'b [&'ll Value],
) -> Cow<'b, [&'ll Value]> {
- let mut fn_ty = self.cx.val_ty(llfn);
- // Strip off pointers
- while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
- fn_ty = self.cx.element_type(fn_ty);
- }
-
assert!(
self.cx.type_kind(fn_ty) == TypeKind::Function,
"builder::{} not passed a function, but {:?}",
unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
}
+ crate fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
+ let (ty, f) = self.cx.get_intrinsic(intrinsic);
+ self.call(ty, f, args, None)
+ }
+
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
let size = size.bytes();
if size == 0 {
return;
}
- let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
-
let ptr = self.pointercast(ptr, self.cx.type_i8p());
- self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
+ self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
}
pub(crate) fn phi(
eh_personality: Cell<Option<&'ll Value>>,
eh_catch_typeinfo: Cell<Option<&'ll Value>>,
- pub rust_try_fn: Cell<Option<&'ll Value>>,
+ pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
- intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>,
+ intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
}
impl CodegenCx<'b, 'tcx> {
- crate fn get_intrinsic(&self, key: &str) -> &'b Value {
+ crate fn get_intrinsic(&self, key: &str) -> (&'b Type, &'b Value) {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
}
name: &'static str,
args: Option<&[&'b llvm::Type]>,
ret: &'b llvm::Type,
- ) -> &'b llvm::Value {
+ ) -> (&'b llvm::Type, &'b llvm::Value) {
let fn_ty = if let Some(args) = args {
self.type_func(args, ret)
} else {
self.type_variadic_func(&[], ret)
};
let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
- self.intrinsics.borrow_mut().insert(name, f);
- f
+ self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
+ (fn_ty, f)
}
- fn declare_intrinsic(&self, key: &str) -> Option<&'b Value> {
+ fn declare_intrinsic(&self, key: &str) -> Option<(&'b Type, &'b Value)> {
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
-use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
+use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
use crate::builder::Builder;
use crate::context::CodegenCx;
use crate::llvm;
use std::cmp::Ordering;
use std::iter;
-fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> {
+fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<(&'ll Type, &'ll Value)> {
let llvm_name = match name {
sym::sqrtf32 => "llvm.sqrt.f32",
sym::sqrtf64 => "llvm.sqrt.f64",
let simple = get_simple_intrinsic(self, name);
let llval = match name {
- _ if simple.is_some() => self.call(
- simple.unwrap(),
- &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
- None,
- ),
- sym::likely => {
- let expect = self.get_intrinsic(&("llvm.expect.i1"));
- self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
+ _ if simple.is_some() => {
+ let (simple_ty, simple_fn) = simple.unwrap();
+ self.call(
+ simple_ty,
+ simple_fn,
+ &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+ None,
+ )
}
- sym::unlikely => {
- let expect = self.get_intrinsic(&("llvm.expect.i1"));
- self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
+ sym::likely => {
+ self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
}
+ sym::unlikely => self
+ .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
kw::Try => {
try_intrinsic(
self,
);
return;
}
- sym::breakpoint => {
- let llfn = self.get_intrinsic(&("llvm.debugtrap"));
- self.call(llfn, &[], None)
- }
+ sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
sym::va_copy => {
- let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
- self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
+ self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
}
sym::va_arg => {
match fn_abi.ret.layout.abi {
| sym::prefetch_write_data
| sym::prefetch_read_instruction
| sym::prefetch_write_instruction => {
- let expect = self.get_intrinsic(&("llvm.prefetch"));
let (rw, cache_type) = match name {
sym::prefetch_read_data => (0, 1),
sym::prefetch_write_data => (1, 1),
sym::prefetch_write_instruction => (1, 0),
_ => bug!(),
};
- self.call(
- expect,
+ self.call_intrinsic(
+ "llvm.prefetch",
&[
args[0].immediate(),
self.const_i32(rw),
args[1].immediate(),
self.const_i32(cache_type),
],
- None,
)
}
sym::ctlz
Some((width, signed)) => match name {
sym::ctlz | sym::cttz => {
let y = self.const_bool(false);
- let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
- self.call(llfn, &[args[0].immediate(), y], None)
+ self.call_intrinsic(
+ &format!("llvm.{}.i{}", name, width),
+ &[args[0].immediate(), y],
+ )
}
sym::ctlz_nonzero | sym::cttz_nonzero => {
let y = self.const_bool(true);
let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width);
- let llfn = self.get_intrinsic(llvm_name);
- self.call(llfn, &[args[0].immediate(), y], None)
+ self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
}
- sym::ctpop => self.call(
- self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
+ sym::ctpop => self.call_intrinsic(
+ &format!("llvm.ctpop.i{}", width),
&[args[0].immediate()],
- None,
),
sym::bswap => {
if width == 8 {
args[0].immediate() // byte swap a u8/i8 is just a no-op
} else {
- self.call(
- self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
+ self.call_intrinsic(
+ &format!("llvm.bswap.i{}", width),
&[args[0].immediate()],
- None,
)
}
}
- sym::bitreverse => self.call(
- self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
+ sym::bitreverse => self.call_intrinsic(
+ &format!("llvm.bitreverse.i{}", width),
&[args[0].immediate()],
- None,
),
sym::rotate_left | sym::rotate_right => {
let is_left = name == sym::rotate_left;
// rotate = funnel shift with first two args the same
let llvm_name =
&format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
- let llfn = self.get_intrinsic(llvm_name);
- self.call(llfn, &[val, val, raw_shift], None)
+ self.call_intrinsic(llvm_name, &[val, val, raw_shift])
}
sym::saturating_add | sym::saturating_sub => {
let is_add = name == sym::saturating_add;
if is_add { "add" } else { "sub" },
width
);
- let llfn = self.get_intrinsic(llvm_name);
- self.call(llfn, &[lhs, rhs], None)
+ self.call_intrinsic(llvm_name, &[lhs, rhs])
}
_ => bug!(),
},
let a_ptr = self.bitcast(a, i8p_ty);
let b_ptr = self.bitcast(b, i8p_ty);
let n = self.const_usize(layout.size.bytes());
- let llfn = self.get_intrinsic("memcmp");
- let cmp = self.call(llfn, &[a_ptr, b_ptr, n], None);
+ let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
}
}
}
fn abort(&mut self) {
- let fnname = self.get_intrinsic(&("llvm.trap"));
- self.call(fnname, &[], None);
+ self.call_intrinsic("llvm.trap", &[]);
}
fn assume(&mut self, val: Self::Value) {
- let assume_intrinsic = self.get_intrinsic("llvm.assume");
- self.call(assume_intrinsic, &[val], None);
+ self.call_intrinsic("llvm.assume", &[val]);
}
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
- let expect = self.get_intrinsic(&"llvm.expect.i1");
- self.call(expect, &[cond, self.const_bool(expected)], None)
+ self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
}
fn sideeffect(&mut self) {
// caller of this function is in `rustc_codegen_ssa`, which is agnostic to whether LLVM
// codegen backend being used, and so is unable to check the LLVM version.
if unsafe { llvm::LLVMRustVersionMajor() } < 12 {
- let fnname = self.get_intrinsic(&("llvm.sideeffect"));
- self.call(fnname, &[], None);
+ self.call_intrinsic("llvm.sideeffect", &[]);
}
}
fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
- let intrinsic = self.cx().get_intrinsic("llvm.va_start");
- self.call(intrinsic, &[va_list], None)
+ self.call_intrinsic("llvm.va_start", &[va_list])
}
fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
- let intrinsic = self.cx().get_intrinsic("llvm.va_end");
- self.call(intrinsic, &[va_list], None)
+ self.call_intrinsic("llvm.va_end", &[va_list])
}
}
dest: &'ll Value,
) {
if bx.sess().panic_strategy() == PanicStrategy::Abort {
- bx.call(try_func, &[data], None);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.call(try_func_ty, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
let ret_align = bx.tcx().data_layout.i32_align.abi;
catch_func: &'ll Value,
dest: &'ll Value,
) {
- let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
let mut normal = bx.build_sibling_block("normal");
// More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
- bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(bx.const_i32(0));
let flags = bx.const_i32(8);
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
- catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ catchpad_rust.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
catchpad_rust.catch_ret(&funclet, caught.llbb());
// The flag value of 64 indicates a "catch-all".
let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p());
let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]);
- catchpad_foreign.call(catch_func, &[data, null], Some(&funclet));
+ catchpad_foreign.call(catch_ty, catch_func, &[data, null], Some(&funclet));
catchpad_foreign.catch_ret(&funclet, caught.llbb());
caught.ret(bx.const_i32(1));
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
catch_func: &'ll Value,
dest: &'ll Value,
) {
- let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
// Codegens the shims described above:
//
// bx:
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
- bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
let tydesc = bx.const_null(bx.type_i8p());
catch.add_clause(vals, tydesc);
let ptr = catch.extract_value(vals, 0);
- catch.call(catch_func, &[data, ptr], None);
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ catch.call(catch_ty, catch_func, &[data, ptr], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
catch_func: &'ll Value,
dest: &'ll Value,
) {
- let llfn = get_rust_try_fn(bx, &mut |mut bx| {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
// Codegens the shims described above:
//
// bx:
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
- bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
let selector = catch.extract_value(vals, 1);
// Check if the typeid we got is the one for a Rust panic.
- let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for");
- let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None);
+ let rust_typeid = catch.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid);
let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool());
catch.store(is_rust_panic, catch_data_1, i8_align);
let catch_data = catch.bitcast(catch_data, bx.type_i8p());
- catch.call(catch_func, &[data, catch_data], None);
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ catch.call(catch_ty, catch_func, &[data, catch_data], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
name: &str,
rust_fn_sig: ty::PolyFnSig<'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
-) -> &'ll Value {
+) -> (&'ll Type, &'ll Value) {
let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
+ let llty = fn_abi.llvm_type(cx);
let llfn = cx.declare_fn(name, &fn_abi);
cx.set_frame_pointer_type(llfn);
cx.apply_target_cpu_attr(llfn);
let llbb = Builder::append_block(cx, llfn, "entry-block");
let bx = Builder::build(cx, llbb);
codegen(bx);
- llfn
+ (llty, llfn)
}
// Helper function used to get a handle to the `__rust_try` function used to
fn get_rust_try_fn<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
-) -> &'ll Value {
+) -> (&'ll Type, &'ll Value) {
if let Some(llfn) = cx.rust_try_fn.get() {
return llfn;
}
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty);
- let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ let c =
+ bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
Ok(c)
}
let llvm_intrinsic =
format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- llvm::UnnamedAddr::No,
- bx.type_func(
- &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
- llvm_elem_vec_ty,
- ),
+ let fn_ty = bx.type_func(
+ &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
+ llvm_elem_vec_ty,
);
- let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v =
+ bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
return Ok(v);
}
let llvm_intrinsic =
format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- llvm::UnnamedAddr::No,
- bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
- );
- let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
+ let fn_ty =
+ bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v =
+ bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
return Ok(v);
}
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- llvm::UnnamedAddr::No,
- bx.type_func(&[vec_ty, vec_ty], vec_ty),
- );
- let v = bx.call(f, &[lhs, rhs], None);
+ let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v = bx.call(fn_ty, f, &[lhs, rhs], None);
return Ok(v);
}
) -> &'a Value;
pub fn LLVMRustBuildInvoke(
B: &Builder<'a>,
+ Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &'a Value;
pub fn LLVMRustBuildCall(
B: &Builder<'a>,
+ Ty: &'a Type,
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
}
fn element_type(&self, ty: &'ll Type) -> &'ll Type {
- unsafe { llvm::LLVMGetElementType(ty) }
+ match self.type_kind(ty) {
+ TypeKind::Array | TypeKind::Vector => unsafe { llvm::LLVMGetElementType(ty) },
+ TypeKind::Pointer => bug!("element_type is not supported for opaque pointers"),
+ other => bug!("element_type called on unsupported type {:?}", other),
+ }
}
fn vector_length(&self, ty: &'ll Type) -> usize {
fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
ty.llvm_type(self)
}
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ fn_abi.llvm_type(self)
+ }
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
fn_abi.ptr_to_llvm_type(self)
}
bx.insert_reference_to_gdb_debug_scripts_section_global();
+ let isize_ty = cx.type_isize();
+ let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
- let (start_fn, args) = if use_start_lang_item {
+ let (start_fn, start_ty, args) = if use_start_lang_item {
let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
let start_fn = cx.get_fn_addr(
ty::Instance::resolve(
.unwrap()
.unwrap(),
);
- (
- start_fn,
- vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv],
- )
+ let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
+ (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
} else {
debug!("using user-defined start fn");
- (rust_main, vec![arg_argc, arg_argv])
+ let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
+ (rust_main, start_ty, vec![arg_argc, arg_argv])
};
- let result = bx.call(start_fn, &args, None);
+ let result = bx.call(start_ty, start_fn, &args, None);
let cast = bx.intcast(result, cx.type_int(), true);
bx.ret(cast);
) {
// If there is a cleanup block and the function we're calling can unwind, then
// do an invoke, otherwise do a call.
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
let ret_llbb = if let Some((_, target)) = destination {
fx.llbb(target)
} else {
fx.unreachable_block()
};
- let invokeret =
- bx.invoke(fn_ptr, &llargs, ret_llbb, self.llblock(fx, cleanup), self.funclet(fx));
+ let invokeret = bx.invoke(
+ fn_ty,
+ fn_ptr,
+ &llargs,
+ ret_llbb,
+ self.llblock(fx, cleanup),
+ self.funclet(fx),
+ );
bx.apply_attrs_callsite(&fn_abi, invokeret);
if let Some((ret_dest, target)) = destination {
fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
- let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
+ let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
};
let instance = ty::Instance::mono(bx.tcx(), def_id);
let r = bx.cx().get_fn_addr(instance);
- let call = bx.call(r, &[llsize, llalign], None);
+ let ty = bx.type_func(&[bx.type_isize(), bx.type_isize()], bx.type_i8p());
+ let call = bx.call(ty, r, &[llsize, llalign], None);
let val = bx.pointercast(call, llty_ptr);
let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
);
fn invoke(
&mut self,
+ llty: Self::Type,
llfn: Self::Value,
args: &[Self::Value],
then: Self::BasicBlock,
fn call(
&mut self,
+ llty: Self::Type,
llfn: Self::Value,
args: &[Self::Value],
funclet: Option<&Self::Funclet>,
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
E0781: include_str!("./error_codes/E0781.md"),
E0782: include_str!("./error_codes/E0782.md"),
E0783: include_str!("./error_codes/E0783.md"),
+E0784: include_str!("./error_codes/E0784.md"),
;
// E0006, // merged with E0005
// E0008, // cannot bind by-move into a pattern guard
E0711, // a feature has been declared with conflicting stability attributes
E0717, // rustc_promotable without stability attribute
// E0721, // `await` keyword
-// E0723, unstable feature in `const` context
+// E0723, unstable feature in `const` context
E0726, // non-explicit (not `'_`) elided lifetime in unsupported position
// E0738, // Removed; errored on `#[track_caller] fn`s in `extern "Rust" { ... }`.
E0772, // `'static' obligation coming from `impl dyn Trait {}` or `impl Foo for dyn Bar {}`.
--- /dev/null
+A union expression does not have exactly one field.
+
+Erroneous code example:
+
+```compile_fail,E0784
+union Bird {
+ pigeon: u8,
+ turtledove: u16,
+}
+
+let bird = Bird {}; // error
+let bird = Bird { pigeon: 0, turtledove: 1 }; // error
+```
+
+The key property of unions is that all fields of a union share common storage.
+As a result, writes to one field of a union can overwrite its other fields, and
+size of a union is determined by the size of its largest field.
+
+You can find more information about the union types in the [Rust reference].
+
+Working example:
+
+```
+union Bird {
+ pigeon: u8,
+ turtledove: u16,
+}
+
+let bird = Bird { pigeon: 0 }; // OK
+```
+
+[Rust reference]: https://doc.rust-lang.org/reference/items/unions.html
self.cx.force_mode = orig_force_mode;
// Finally incorporate all the expanded macros into the input AST fragment.
- let mut placeholder_expander = PlaceholderExpander::new(self.cx, self.monotonic);
+ let mut placeholder_expander = PlaceholderExpander::default();
while let Some(expanded_fragments) = expanded_fragments.pop() {
for (expn_id, expanded_fragment) in expanded_fragments.into_iter().rev() {
placeholder_expander
}
}
- // The placeholder expander gives ids to statements, so we avoid folding the id here.
// We don't use `assign_id!` - it will be called when we visit statement's contents
// (e.g. an expression, item, or local)
- let ast::Stmt { id, kind, span } = stmt;
- let res = noop_flat_map_stmt_kind(kind, self)
- .into_iter()
- .map(|kind| ast::Stmt { id, kind, span })
- .collect();
+ let res = noop_flat_map_stmt(stmt, self);
self.cx.current_expansion.is_trailing_mac = false;
res
#![feature(proc_macro_internals)]
#![feature(proc_macro_span)]
#![feature(try_blocks)]
+#![recursion_limit = "256"]
#[macro_use]
extern crate rustc_macros;
-use crate::base::ExtCtxt;
use crate::expand::{AstFragment, AstFragmentKind};
use rustc_ast as ast;
}
}
-pub struct PlaceholderExpander<'a, 'b> {
+#[derive(Default)]
+pub struct PlaceholderExpander {
expanded_fragments: FxHashMap<ast::NodeId, AstFragment>,
- cx: &'a mut ExtCtxt<'b>,
- monotonic: bool,
}
-impl<'a, 'b> PlaceholderExpander<'a, 'b> {
- pub fn new(cx: &'a mut ExtCtxt<'b>, monotonic: bool) -> Self {
- PlaceholderExpander { cx, expanded_fragments: FxHashMap::default(), monotonic }
- }
-
+impl PlaceholderExpander {
pub fn add(&mut self, id: ast::NodeId, mut fragment: AstFragment) {
fragment.mut_visit_with(self);
self.expanded_fragments.insert(id, fragment);
}
}
-impl<'a, 'b> MutVisitor for PlaceholderExpander<'a, 'b> {
+impl MutVisitor for PlaceholderExpander {
fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> {
if arm.is_placeholder {
self.remove(arm.id).make_arms()
_ => noop_visit_ty(ty, self),
}
}
-
- fn visit_block(&mut self, block: &mut P<ast::Block>) {
- noop_visit_block(block, self);
-
- for stmt in block.stmts.iter_mut() {
- if self.monotonic {
- assert_eq!(stmt.id, ast::DUMMY_NODE_ID);
- stmt.id = self.cx.resolver.next_node_id();
- }
- }
- }
}
"detects deprecation attributes with no effect",
}
+declare_lint! {
+ /// The `undefined_naked_function_abi` lint detects naked function definitions that
+ /// either do not specify an ABI or specify the Rust ABI.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(naked_functions)]
+ /// #![feature(asm)]
+ ///
+ /// #[naked]
+ /// pub fn default_abi() -> u32 {
+ /// unsafe { asm!("", options(noreturn)); }
+ /// }
+ ///
+ /// #[naked]
+ /// pub extern "Rust" fn rust_abi() -> u32 {
+ /// unsafe { asm!("", options(noreturn)); }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The Rust ABI is currently undefined. Therefore, naked functions should
+ /// specify a non-Rust ABI.
+ pub UNDEFINED_NAKED_FUNCTION_ABI,
+ Warn,
+ "undefined naked function ABI"
+}
+
declare_lint! {
/// The `unsupported_naked_functions` lint detects naked function
/// definitions that are unsupported but were previously accepted.
/// #![feature(naked_functions)]
///
/// #[naked]
- /// pub fn f() -> u32 {
+ /// pub extern "C" fn f() -> u32 {
/// 42
/// }
/// ```
Oz,
};
-static PassBuilder::OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
+#if LLVM_VERSION_LT(14,0)
+using OptimizationLevel = PassBuilder::OptimizationLevel;
+#endif
+
+static OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
switch (Level) {
case LLVMRustPassBuilderOptLevel::O0:
- return PassBuilder::OptimizationLevel::O0;
+ return OptimizationLevel::O0;
case LLVMRustPassBuilderOptLevel::O1:
- return PassBuilder::OptimizationLevel::O1;
+ return OptimizationLevel::O1;
case LLVMRustPassBuilderOptLevel::O2:
- return PassBuilder::OptimizationLevel::O2;
+ return OptimizationLevel::O2;
case LLVMRustPassBuilderOptLevel::O3:
- return PassBuilder::OptimizationLevel::O3;
+ return OptimizationLevel::O3;
case LLVMRustPassBuilderOptLevel::Os:
- return PassBuilder::OptimizationLevel::Os;
+ return OptimizationLevel::Os;
case LLVMRustPassBuilderOptLevel::Oz:
- return PassBuilder::OptimizationLevel::Oz;
+ return OptimizationLevel::Oz;
default:
report_fatal_error("Bad PassBuilderOptLevel.");
}
const char *ExtraPasses, size_t ExtraPassesLen) {
Module *TheModule = unwrap(ModuleRef);
TargetMachine *TM = unwrap(TMRef);
- PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
+ OptimizationLevel OptLevel = fromRust(OptLevelRust);
PipelineTuningOptions PTO;
// We manually collect pipeline callbacks so we can apply them at O0, where the
// PassBuilder does not create a pipeline.
- std::vector<std::function<void(ModulePassManager &, PassBuilder::OptimizationLevel)>>
+ std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
PipelineStartEPCallbacks;
#if LLVM_VERSION_GE(11, 0)
- std::vector<std::function<void(ModulePassManager &, PassBuilder::OptimizationLevel)>>
+ std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
OptimizerLastEPCallbacks;
#else
- std::vector<std::function<void(FunctionPassManager &, PassBuilder::OptimizationLevel)>>
+ std::vector<std::function<void(FunctionPassManager &, OptimizationLevel)>>
OptimizerLastEPCallbacks;
#endif
if (VerifyIR) {
PipelineStartEPCallbacks.push_back(
- [VerifyIR](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(VerifierPass());
}
);
if (InstrumentGCOV) {
PipelineStartEPCallbacks.push_back(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
}
);
if (InstrumentCoverage) {
PipelineStartEPCallbacks.push_back(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
InstrProfOptions Options;
MPM.addPass(InstrProfiling(Options, false));
}
/*CompileKernel=*/false);
#if LLVM_VERSION_GE(11, 0)
OptimizerLastEPCallbacks.push_back(
- [Options](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(MemorySanitizerPass(Options));
MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass(Options)));
}
);
#else
PipelineStartEPCallbacks.push_back(
- [Options](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(MemorySanitizerPass(Options));
}
);
OptimizerLastEPCallbacks.push_back(
- [Options](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ [Options](FunctionPassManager &FPM, OptimizationLevel Level) {
FPM.addPass(MemorySanitizerPass(Options));
}
);
if (SanitizerOptions->SanitizeThread) {
#if LLVM_VERSION_GE(11, 0)
OptimizerLastEPCallbacks.push_back(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(ThreadSanitizerPass());
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
}
);
#else
PipelineStartEPCallbacks.push_back(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(ThreadSanitizerPass());
}
);
OptimizerLastEPCallbacks.push_back(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ [](FunctionPassManager &FPM, OptimizationLevel Level) {
FPM.addPass(ThreadSanitizerPass());
}
);
if (SanitizerOptions->SanitizeAddress) {
#if LLVM_VERSION_GE(11, 0)
OptimizerLastEPCallbacks.push_back(
- [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
MPM.addPass(ModuleAddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
);
#else
PipelineStartEPCallbacks.push_back(
- [&](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [&](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
}
);
OptimizerLastEPCallbacks.push_back(
- [SanitizerOptions](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ [SanitizerOptions](FunctionPassManager &FPM, OptimizationLevel Level) {
FPM.addPass(AddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover,
/*UseAfterScope=*/true));
}
);
PipelineStartEPCallbacks.push_back(
- [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(ModuleAddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
}
if (SanitizerOptions->SanitizeHWAddress) {
#if LLVM_VERSION_GE(11, 0)
OptimizerLastEPCallbacks.push_back(
- [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(HWAddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
}
);
#else
PipelineStartEPCallbacks.push_back(
- [SanitizerOptions](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(HWAddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
}
#endif
bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
if (!NoPrepopulatePasses) {
- if (OptLevel == PassBuilder::OptimizationLevel::O0) {
+ if (OptLevel == OptimizationLevel::O0) {
#if LLVM_VERSION_GE(12, 0)
for (const auto &C : PipelineStartEPCallbacks)
PB.registerPipelineStartEPCallback(C);
delete Bundle;
}
-extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
+extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
OperandBundleDef *Bundle) {
Value *Callee = unwrap(Fn);
- FunctionType *FTy = cast<FunctionType>(Callee->getType()->getPointerElementType());
+ FunctionType *FTy = unwrap<FunctionType>(Ty);
unsigned Len = Bundle ? 1 : 0;
ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, Len);
return wrap(unwrap(B)->CreateCall(
}
extern "C" LLVMValueRef
-LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
- unsigned NumArgs, LLVMBasicBlockRef Then,
- LLVMBasicBlockRef Catch, OperandBundleDef *Bundle,
- const char *Name) {
+LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ OperandBundleDef *Bundle, const char *Name) {
Value *Callee = unwrap(Fn);
- FunctionType *FTy = cast<FunctionType>(Callee->getType()->getPointerElementType());
+ FunctionType *FTy = unwrap<FunctionType>(Ty);
unsigned Len = Bundle ? 1 : 0;
ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, Len);
return wrap(unwrap(B)->CreateInvoke(FTy, Callee, unwrap(Then), unwrap(Catch),
modifier `-bundle` with library kind `static`",
)
.emit();
+ if !self.tcx.features().static_nobundle {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::static_nobundle,
+ item.span(),
+ "kind=\"static-nobundle\" is unstable",
+ )
+ .emit();
+ }
NativeLibKind::Static { bundle: Some(false), whole_archive: None }
}
"dylib" => NativeLibKind::Dylib { as_needed: None },
)
.emit();
}
- if matches!(lib.kind, NativeLibKind::Static { bundle: Some(false), .. })
- && !self.tcx.features().static_nobundle
- {
- feature_err(
- &self.tcx.sess.parse_sess,
- sym::static_nobundle,
- span.unwrap_or(rustc_span::DUMMY_SP),
- "kind=\"static-nobundle\" is unstable",
- )
- .emit();
- }
// this just unwraps lib.name; we already established that it isn't empty above.
if let (NativeLibKind::RawDylib, Some(lib_name)) = (lib.kind, lib.name) {
let span = match span {
| DefKind::AnonConst
| DefKind::OpaqueTy
| DefKind::Impl
+ | DefKind::Field
+ | DefKind::TyParam
| DefKind::Closure
| DefKind::Generator => true,
DefKind::Mod
- | DefKind::Field
| DefKind::ForeignMod
- | DefKind::TyParam
| DefKind::ConstParam
| DefKind::Macro(..)
| DefKind::Use
#![feature(iter_zip)]
#![feature(thread_local_const_init)]
#![feature(try_reserve)]
+#![feature(try_reserve_kind)]
#![feature(nonzero_ops)]
#![recursion_limit = "512"]
#![feature(once_cell)]
#![feature(control_flow_enum)]
#![feature(try_reserve)]
+#![feature(try_reserve_kind)]
#![recursion_limit = "256"]
#[macro_use]
pub(super) fn parse_fn_front_matter(&mut self) -> PResult<'a, FnHeader> {
let sp_start = self.token.span;
let constness = self.parse_constness();
+
+ let async_start_sp = self.token.span;
let asyncness = self.parse_asyncness();
+
+ let unsafe_start_sp = self.token.span;
let unsafety = self.parse_unsafety();
+
+ let ext_start_sp = self.token.span;
let ext = self.parse_extern();
if let Async::Yes { span, .. } = asyncness {
Ok(true) => {}
Ok(false) => unreachable!(),
Err(mut err) => {
+ // Qualifier keywords ordering check
+
+ // This will allow the machine fix to directly place the keyword in the correct place
+ let current_qual_sp = if self.check_keyword(kw::Const) {
+ Some(async_start_sp)
+ } else if self.check_keyword(kw::Async) {
+ Some(unsafe_start_sp)
+ } else if self.check_keyword(kw::Unsafe) {
+ Some(ext_start_sp)
+ } else {
+ None
+ };
+
+ if let Some(current_qual_sp) = current_qual_sp {
+ let current_qual_sp = current_qual_sp.to(self.prev_token.span);
+ if let Ok(current_qual) = self.span_to_snippet(current_qual_sp) {
+ let invalid_qual_sp = self.token.uninterpolated_span();
+ let invalid_qual = self.span_to_snippet(invalid_qual_sp).unwrap();
+
+ err.span_suggestion(
+ current_qual_sp.to(invalid_qual_sp),
+ &format!("`{}` must come before `{}`", invalid_qual, current_qual),
+ format!("{} {}", invalid_qual, current_qual),
+ Applicability::MachineApplicable,
+ ).note("keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`");
+ }
+ }
// Recover incorrect visibility order such as `async pub`.
- if self.check_keyword(kw::Pub) {
+ else if self.check_keyword(kw::Pub) {
let sp = sp_start.to(self.prev_token.span);
if let Ok(snippet) = self.span_to_snippet(sp) {
let vis = match self.parse_visibility(FollowedByType::No) {
use rustc_hir::{ExprKind, HirId, InlineAsmOperand, StmtKind};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
+use rustc_session::lint::builtin::UNDEFINED_NAKED_FUNCTION_ABI;
use rustc_session::lint::builtin::UNSUPPORTED_NAKED_FUNCTIONS;
use rustc_span::symbol::sym;
use rustc_span::Span;
/// Checks that function uses non-Rust ABI.
fn check_abi(tcx: TyCtxt<'_>, hir_id: HirId, abi: Abi, fn_ident_span: Span) {
if abi == Abi::Rust {
- tcx.struct_span_lint_hir(UNSUPPORTED_NAKED_FUNCTIONS, hir_id, fn_ident_span, |lint| {
+ tcx.struct_span_lint_hir(UNDEFINED_NAKED_FUNCTION_ABI, hir_id, fn_ident_span, |lint| {
lint.build("Rust ABI is unsupported in naked functions").emit();
});
}
/// similarly named label and whether or not it is reachable.
crate type LabelSuggestion = (Ident, bool);
+crate enum SuggestionTarget {
+ /// The target has a similar name as the name used by the programmer (probably a typo)
+ SimilarlyNamed,
+ /// The target is the only valid item that can be used in the corresponding context
+ SingleItem,
+}
+
crate struct TypoSuggestion {
pub candidate: Symbol,
pub res: Res,
+ pub target: SuggestionTarget,
}
impl TypoSuggestion {
- crate fn from_res(candidate: Symbol, res: Res) -> TypoSuggestion {
- TypoSuggestion { candidate, res }
+ crate fn typo_from_res(candidate: Symbol, res: Res) -> TypoSuggestion {
+ Self { candidate, res, target: SuggestionTarget::SimilarlyNamed }
+ }
+ crate fn single_item_from_res(candidate: Symbol, res: Res) -> TypoSuggestion {
+ Self { candidate, res, target: SuggestionTarget::SingleItem }
}
}
if let Some(binding) = resolution.borrow().binding {
let res = binding.res();
if filter_fn(res) {
- names.push(TypoSuggestion::from_res(key.ident.name, res));
+ names.push(TypoSuggestion::typo_from_res(key.ident.name, res));
}
}
}
.get(&expn_id)
.into_iter()
.flatten()
- .map(|ident| TypoSuggestion::from_res(ident.name, res)),
+ .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
);
}
}
suggestions.extend(
ext.helper_attrs
.iter()
- .map(|name| TypoSuggestion::from_res(*name, res)),
+ .map(|name| TypoSuggestion::typo_from_res(*name, res)),
);
}
}
if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope.get() {
let res = macro_rules_binding.binding.res();
if filter_fn(res) {
- suggestions
- .push(TypoSuggestion::from_res(macro_rules_binding.ident.name, res))
+ suggestions.push(TypoSuggestion::typo_from_res(
+ macro_rules_binding.ident.name,
+ res,
+ ))
}
}
}
suggestions.extend(
this.registered_attrs
.iter()
- .map(|ident| TypoSuggestion::from_res(ident.name, res)),
+ .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
);
}
}
suggestions.extend(this.macro_use_prelude.iter().filter_map(
|(name, binding)| {
let res = binding.res();
- filter_fn(res).then_some(TypoSuggestion::from_res(*name, res))
+ filter_fn(res).then_some(TypoSuggestion::typo_from_res(*name, res))
},
));
}
suggestions.extend(
BUILTIN_ATTRIBUTES
.iter()
- .map(|(name, ..)| TypoSuggestion::from_res(*name, res)),
+ .map(|(name, ..)| TypoSuggestion::typo_from_res(*name, res)),
);
}
}
Scope::ExternPrelude => {
suggestions.extend(this.extern_prelude.iter().filter_map(|(ident, _)| {
let res = Res::Def(DefKind::Mod, DefId::local(CRATE_DEF_INDEX));
- filter_fn(res).then_some(TypoSuggestion::from_res(ident.name, res))
+ filter_fn(res).then_some(TypoSuggestion::typo_from_res(ident.name, res))
}));
}
Scope::ToolPrelude => {
suggestions.extend(
this.registered_tools
.iter()
- .map(|ident| TypoSuggestion::from_res(ident.name, res)),
+ .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
);
}
Scope::StdLibPrelude => {
Scope::BuiltinTypes => {
suggestions.extend(PrimTy::ALL.iter().filter_map(|prim_ty| {
let res = Res::PrimTy(*prim_ty);
- filter_fn(res).then_some(TypoSuggestion::from_res(prim_ty.name(), res))
+ filter_fn(res).then_some(TypoSuggestion::typo_from_res(prim_ty.name(), res))
}))
}
}
// | ^
return false;
}
+ let prefix = match suggestion.target {
+ SuggestionTarget::SimilarlyNamed => "similarly named ",
+ SuggestionTarget::SingleItem => "",
+ };
+
err.span_label(
self.session.source_map().guess_head_span(def_span),
&format!(
- "similarly named {} `{}` defined here",
+ "{}{} `{}` defined here",
+ prefix,
suggestion.res.descr(),
suggestion.candidate.as_str(),
),
);
}
- let msg = format!(
- "{} {} with a similar name exists",
- suggestion.res.article(),
- suggestion.res.descr()
- );
+ let msg = match suggestion.target {
+ SuggestionTarget::SimilarlyNamed => format!(
+ "{} {} with a similar name exists",
+ suggestion.res.article(),
+ suggestion.res.descr()
+ ),
+ SuggestionTarget::SingleItem => {
+ format!("maybe you meant this {}", suggestion.res.descr())
+ }
+ };
err.span_suggestion(
span,
&msg,
}
_ => {}
}
+
+ // If the trait has a single item (which wasn't matched by Levenshtein), suggest it
+ let suggestion = self.get_single_associated_item(&path, span, &source, is_expected);
+ self.r.add_typo_suggestion(&mut err, suggestion, ident_span);
}
if fallback {
// Fallback label.
(err, candidates)
}
+ fn get_single_associated_item(
+ &mut self,
+ path: &[Segment],
+ span: Span,
+ source: &PathSource<'_>,
+ filter_fn: &impl Fn(Res) -> bool,
+ ) -> Option<TypoSuggestion> {
+ if let crate::PathSource::TraitItem(_) = source {
+ let mod_path = &path[..path.len() - 1];
+ if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
+ self.resolve_path(mod_path, None, false, span, CrateLint::No)
+ {
+ let resolutions = self.r.resolutions(module).borrow();
+ let targets: Vec<_> =
+ resolutions
+ .iter()
+ .filter_map(|(key, resolution)| {
+ resolution.borrow().binding.map(|binding| binding.res()).and_then(
+ |res| if filter_fn(res) { Some((key, res)) } else { None },
+ )
+ })
+ .collect();
+ if targets.len() == 1 {
+ let target = targets[0];
+ return Some(TypoSuggestion::single_item_from_res(
+ target.0.ident.name,
+ target.1,
+ ));
+ }
+ }
+ }
+ None
+ }
+
/// Given `where <T as Bar>::Baz: String`, suggest `where T: Bar<Baz = String>`.
fn restrict_assoc_type_in_where_clause(
&mut self,
// Locals and type parameters
for (ident, &res) in &rib.bindings {
if filter_fn(res) {
- names.push(TypoSuggestion::from_res(ident.name, res));
+ names.push(TypoSuggestion::typo_from_res(ident.name, res));
}
}
// Items in scope
);
if filter_fn(crate_mod) {
- Some(TypoSuggestion::from_res(ident.name, crate_mod))
+ Some(TypoSuggestion::typo_from_res(
+ ident.name, crate_mod,
+ ))
} else {
None
}
}
// Add primitive types to the mix
if filter_fn(Res::PrimTy(PrimTy::Bool)) {
- names.extend(
- PrimTy::ALL.iter().map(|prim_ty| {
- TypoSuggestion::from_res(prim_ty.name(), Res::PrimTy(*prim_ty))
- }),
- )
+ names.extend(PrimTy::ALL.iter().map(|prim_ty| {
+ TypoSuggestion::typo_from_res(prim_ty.name(), Res::PrimTy(*prim_ty))
+ }))
}
} else {
// Search in module.
}
}
-fn parse_native_lib_kind(kind: &str, error_format: ErrorOutputType) -> NativeLibKind {
- match kind {
+fn parse_native_lib_kind(
+ matches: &getopts::Matches,
+ kind: &str,
+ error_format: ErrorOutputType,
+) -> (NativeLibKind, Option<bool>) {
+ let is_nightly = nightly_options::match_is_nightly_build(matches);
+ let enable_unstable = nightly_options::is_unstable_enabled(matches);
+
+ let (kind, modifiers) = match kind.split_once(':') {
+ None => (kind, None),
+ Some((kind, modifiers)) => (kind, Some(modifiers)),
+ };
+
+ let kind = match kind {
"dylib" => NativeLibKind::Dylib { as_needed: None },
"framework" => NativeLibKind::Framework { as_needed: None },
"static" => NativeLibKind::Static { bundle: None, whole_archive: None },
"library kind `static-nobundle` has been superseded by specifying \
`-bundle` on library kind `static`. Try `static:-bundle`",
);
+ if modifiers.is_some() {
+ early_error(
+ error_format,
+ "linking modifier can't be used with library kind `static-nobundle`",
+ )
+ }
+ if !is_nightly {
+ early_error(
+ error_format,
+ "library kind `static-nobundle` are currently unstable and only accepted on \
+ the nightly compiler",
+ );
+ }
NativeLibKind::Static { bundle: Some(false), whole_archive: None }
}
s => early_error(
error_format,
&format!("unknown library kind `{}`, expected one of dylib, framework, or static", s),
),
+ };
+ match modifiers {
+ None => (kind, None),
+ Some(modifiers) => {
+ if !is_nightly {
+ early_error(
+ error_format,
+ "linking modifiers are currently unstable and only accepted on \
+ the nightly compiler",
+ );
+ }
+ if !enable_unstable {
+ early_error(
+ error_format,
+ "linking modifiers are currently unstable, \
+ the `-Z unstable-options` flag must also be passed to use it",
+ )
+ }
+ parse_native_lib_modifiers(kind, modifiers, error_format)
+ }
}
}
fn parse_native_lib_modifiers(
- is_nightly: bool,
mut kind: NativeLibKind,
modifiers: &str,
error_format: ErrorOutputType,
),
};
- if !is_nightly {
- early_error(
- error_format,
- "linking modifiers are currently unstable and only accepted on \
- the nightly compiler",
- );
- }
-
match (modifier, &mut kind) {
("bundle", NativeLibKind::Static { bundle, .. }) => {
*bundle = Some(value);
}
fn parse_libs(matches: &getopts::Matches, error_format: ErrorOutputType) -> Vec<NativeLib> {
- let is_nightly = nightly_options::match_is_nightly_build(matches);
matches
.opt_strs("l")
.into_iter()
let (name, kind, verbatim) = match s.split_once('=') {
None => (s, NativeLibKind::Unspecified, None),
Some((kind, name)) => {
- let (kind, verbatim) = match kind.split_once(':') {
- None => (parse_native_lib_kind(kind, error_format), None),
- Some((kind, modifiers)) => {
- let kind = parse_native_lib_kind(kind, error_format);
- parse_native_lib_modifiers(is_nightly, kind, modifiers, error_format)
- }
- };
+ let (kind, verbatim) = parse_native_lib_kind(matches, kind, error_format);
(name.to_string(), kind, verbatim)
}
};
{
let old_count = self.err_count();
let result = f();
- let errors = self.err_count() - old_count;
- if errors == 0 { Ok(result) } else { Err(ErrorReported) }
+ if self.err_count() == old_count { Ok(result) } else { Err(ErrorReported) }
}
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.diagnostic().span_warn(sp, msg)
// Make sure the programmer specified correct number of fields.
if kind_name == "union" {
if ast_fields.len() != 1 {
- tcx.sess.span_err(span, "union expressions should have exactly one field");
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0784,
+ "union expressions should have exactly one field",
+ )
+ .emit();
}
} else if check_completeness && !error_happened && !remaining_fields.is_empty() {
let no_accessible_remaining_fields = remaining_fields
}
/// If this `DefId` is a "primary tables entry", returns
-/// `Some((body_id, header, decl))` with information about
-/// its body-id, fn-header and fn-decl (if any). Otherwise,
-/// returns `None`.
+/// `Some((body_id, body_ty, fn_sig))`. Otherwise, returns `None`.
///
/// If this function returns `Some`, then `typeck_results(def_id)` will
/// succeed; if it returns `None`, then `typeck_results(def_id)` may or
fn primary_body_of(
tcx: TyCtxt<'_>,
id: hir::HirId,
-) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnHeader>, Option<&hir::FnDecl<'_>>)> {
+) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> {
match tcx.hir().get(id) {
Node::Item(item) => match item.kind {
hir::ItemKind::Const(ref ty, body) | hir::ItemKind::Static(ref ty, _, body) => {
- Some((body, Some(ty), None, None))
- }
- hir::ItemKind::Fn(ref sig, .., body) => {
- Some((body, None, Some(&sig.header), Some(&sig.decl)))
+ Some((body, Some(ty), None))
}
+ hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(&sig))),
_ => None,
},
Node::TraitItem(item) => match item.kind {
- hir::TraitItemKind::Const(ref ty, Some(body)) => Some((body, Some(ty), None, None)),
+ hir::TraitItemKind::Const(ref ty, Some(body)) => Some((body, Some(ty), None)),
hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
- Some((body, None, Some(&sig.header), Some(&sig.decl)))
+ Some((body, None, Some(&sig)))
}
_ => None,
},
Node::ImplItem(item) => match item.kind {
- hir::ImplItemKind::Const(ref ty, body) => Some((body, Some(ty), None, None)),
- hir::ImplItemKind::Fn(ref sig, body) => {
- Some((body, None, Some(&sig.header), Some(&sig.decl)))
- }
+ hir::ImplItemKind::Const(ref ty, body) => Some((body, Some(ty), None)),
+ hir::ImplItemKind::Fn(ref sig, body) => Some((body, None, Some(&sig))),
_ => None,
},
- Node::AnonConst(constant) => Some((constant.body, None, None, None)),
+ Node::AnonConst(constant) => Some((constant.body, None, None)),
_ => None,
}
}
let span = tcx.hir().span(id);
// Figure out what primary body this item has.
- let (body_id, body_ty, fn_header, fn_decl) = primary_body_of(tcx, id).unwrap_or_else(|| {
+ let (body_id, body_ty, fn_sig) = primary_body_of(tcx, id).unwrap_or_else(|| {
span_bug!(span, "can't type-check body of {:?}", def_id);
});
let body = tcx.hir().body(body_id);
let typeck_results = Inherited::build(tcx, def_id).enter(|inh| {
let param_env = tcx.param_env(def_id);
- let fcx = if let (Some(header), Some(decl)) = (fn_header, fn_decl) {
+ let fcx = if let Some(hir::FnSig { header, decl, .. }) = fn_sig {
let fn_sig = if crate::collect::get_infer_ret_ty(&decl.output).is_some() {
let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
<dyn AstConv<'_>>::ty_of_fn(
fcx.select_all_obligations_or_error();
- if fn_decl.is_some() {
+ if fn_sig.is_some() {
fcx.regionck_fn(id, body);
} else {
fcx.regionck_expr(body);
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// use std::collections::BinaryHeap;
/// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
///
/// assert!(heap.capacity() >= 10);
/// ```
#[inline]
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.data.shrink_to(min_capacity)
}
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
-pub enum TryReserveError {
+pub struct TryReserveError {
+ kind: TryReserveErrorKind,
+}
+
+impl TryReserveError {
+ /// Details about the allocation that caused the error
+ #[inline]
+ #[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+ )]
+ pub fn kind(&self) -> TryReserveErrorKind {
+ self.kind.clone()
+ }
+}
+
+/// Details of the allocation that caused a `TryReserveError`
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+pub enum TryReserveErrorKind {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
},
}
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+impl From<TryReserveErrorKind> for TryReserveError {
+ fn from(kind: TryReserveErrorKind) -> Self {
+ Self { kind }
+ }
+}
+
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
impl From<LayoutError> for TryReserveError {
- /// Always evaluates to [`TryReserveError::CapacityOverflow`].
+ /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
#[inline]
fn from(_: LayoutError) -> Self {
- TryReserveError::CapacityOverflow
+ TryReserveErrorKind::CapacityOverflow.into()
}
}
fmt: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
fmt.write_str("memory allocation failed")?;
- let reason = match &self {
- TryReserveError::CapacityOverflow => {
+ let reason = match self.kind {
+ TryReserveErrorKind::CapacityOverflow => {
" because the computed capacity exceeded the collection's maximum"
}
- TryReserveError::AllocError { .. } => " because the memory allocator returned a error",
+ TryReserveErrorKind::AllocError { .. } => {
+ " because the memory allocator returned a error"
+ }
};
fmt.write_str(reason)
}
use crate::alloc::{Allocator, Global};
use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind;
use crate::raw_vec::RawVec;
use crate::vec::Vec;
let new_cap = used_cap
.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
- .ok_or(TryReserveError::CapacityOverflow)?;
+ .ok_or(TryReserveErrorKind::CapacityOverflow)?;
if new_cap > old_cap {
self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::with_capacity(15);
/// buf.shrink_to(0);
/// assert!(buf.capacity() >= 4);
/// ```
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
let min_capacity = cmp::min(min_capacity, self.capacity());
// We don't have to worry about an overflow as neither `self.len()` nor `self.capacity()`
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
-use crate::collections::TryReserveError::{self, *};
+use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind::*;
#[cfg(test)]
mod tests;
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
- return Err(CapacityOverflow);
+ return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
- return Err(CapacityOverflow);
+ return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let ptr = unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
- self.alloc.shrink(ptr, layout, new_layout).map_err(|_| TryReserveError::AllocError {
- layout: new_layout,
- non_exhaustive: (),
- })?
+ self.alloc
+ .shrink(ptr, layout, new_layout)
+ .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
self.set_ptr(ptr);
Ok(())
alloc.allocate(new_layout)
};
- memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })
+ memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
#[inline]
fn handle_reserve(result: Result<(), TryReserveError>) {
- match result {
+ match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
- Err(CapacityOverflow)
+ Err(CapacityOverflow.into())
} else {
Ok(())
}
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// let mut s = String::from("foo");
///
/// s.reserve(100);
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.vec.shrink_to(min_capacity)
}
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
/// assert_eq!(vec.capacity(), 10);
/// assert!(vec.capacity() >= 3);
/// ```
#[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
if self.capacity() > min_capacity {
self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
#![feature(pattern)]
#![feature(trusted_len)]
#![feature(try_reserve)]
+#![feature(try_reserve_kind)]
#![feature(unboxed_closures)]
#![feature(associated_type_bounds)]
#![feature(binary_heap_into_iter_sorted)]
use std::borrow::Cow;
use std::cell::Cell;
-use std::collections::TryReserveError::*;
+use std::collections::TryReserveErrorKind::*;
use std::ops::Bound;
use std::ops::Bound::*;
use std::ops::RangeBounds;
let mut empty_string: String = String::new();
// Check isize::MAX doesn't count as an overflow
- if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
// Play it again, frank! (just to be sure)
- if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
// Check isize::MAX + 1 does count as overflow
- if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP + 1) {
+ if let Err(CapacityOverflow) =
+ empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
// Check usize::MAX does count as overflow
- if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
// Check isize::MAX + 1 is an OOM
- if let Err(AllocError { .. }) = empty_string.try_reserve(MAX_CAP + 1) {
+ if let Err(AllocError { .. }) =
+ empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
// Check usize::MAX is an OOM
- if let Err(AllocError { .. }) = empty_string.try_reserve(MAX_USIZE) {
+ if let Err(AllocError { .. }) =
+ empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an OOM!")
}
// Same basic idea, but with non-zero len
let mut ten_bytes: String = String::from("0123456789");
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9) {
+ if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should always overflow in the add-to-len
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!")
}
{
let mut empty_string: String = String::new();
- if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP + 1) {
+ if let Err(CapacityOverflow) =
+ empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
- if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_USIZE) {
+ if let Err(CapacityOverflow) =
+ empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
- if let Err(AllocError { .. }) = empty_string.try_reserve_exact(MAX_CAP + 1) {
+ if let Err(AllocError { .. }) =
+ empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
- if let Err(AllocError { .. }) = empty_string.try_reserve_exact(MAX_USIZE) {
+ if let Err(AllocError { .. }) =
+ empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an OOM!")
}
{
let mut ten_bytes: String = String::from("0123456789");
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
+ if let Err(AllocError { .. }) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
use std::borrow::Cow;
use std::cell::Cell;
-use std::collections::TryReserveError::*;
+use std::collections::TryReserveErrorKind::*;
use std::fmt::Debug;
use std::iter::InPlaceIterable;
use std::mem::{size_of, swap};
let mut empty_bytes: Vec<u8> = Vec::new();
// Check isize::MAX doesn't count as an overflow
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
// Play it again, frank! (just to be sure)
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
// Check isize::MAX + 1 does count as overflow
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) {
+ if let Err(CapacityOverflow) =
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
// Check usize::MAX does count as overflow
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
// Check isize::MAX + 1 is an OOM
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_CAP + 1) {
+ if let Err(AllocError { .. }) =
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
// Check usize::MAX is an OOM
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE) {
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an OOM!")
}
// Same basic idea, but with non-zero len
let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9) {
+ if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should always overflow in the add-to-len
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!")
}
// Same basic idea, but with interesting type size
let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
+ if let Err(AllocError { .. }) =
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should fail in the mul-by-size
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) {
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
{
let mut empty_bytes: Vec<u8> = Vec::new();
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) {
+ if let Err(CapacityOverflow) =
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) {
+ if let Err(CapacityOverflow) =
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
} else {
- if let Err(AllocError { .. }) = empty_bytes.try_reserve_exact(MAX_CAP + 1) {
+ if let Err(AllocError { .. }) =
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
- if let Err(AllocError { .. }) = empty_bytes.try_reserve_exact(MAX_USIZE) {
+ if let Err(AllocError { .. }) =
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an OOM!")
}
{
let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
+ if let Err(AllocError { .. }) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
{
let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
+ if let Err(AllocError { .. }) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
-use std::collections::TryReserveError::*;
+use std::collections::TryReserveErrorKind::*;
use std::collections::{vec_deque::Drain, VecDeque};
use std::fmt::Debug;
use std::mem::size_of;
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
// Check isize::MAX doesn't count as an overflow
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
// Play it again, frank! (just to be sure)
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
// Check isize::MAX + 1 does count as overflow
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) {
+ if let Err(CapacityOverflow) =
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
// Check usize::MAX does count as overflow
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
// VecDeque starts with capacity 7, always adds 1 to the capacity
// and also rounds the number to next power of 2 so this is the
// furthest we can go without triggering CapacityOverflow
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_CAP) {
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
// Same basic idea, but with non-zero len
let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9) {
+ if let Err(AllocError { .. }) = ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should always overflow in the add-to-len
- if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!")
}
// Same basic idea, but with interesting type size
let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_u32s.try_reserve(MAX_CAP / 4 - 9) {
+ if let Err(AllocError { .. }) =
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
// Should fail in the mul-by-size
- if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) {
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
{
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) {
+ if let Err(CapacityOverflow) =
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!")
}
- if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) {
+ if let Err(CapacityOverflow) =
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
// VecDeque starts with capacity 7, always adds 1 to the capacity
// and also rounds the number to next power of 2 so this is the
// furthest we can go without triggering CapacityOverflow
- if let Err(AllocError { .. }) = empty_bytes.try_reserve_exact(MAX_CAP) {
+ if let Err(AllocError { .. }) =
+ empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
{
let mut ten_bytes: VecDeque<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_bytes.try_reserve_exact(MAX_CAP - 9) {
+ if let Err(AllocError { .. }) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
- if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) {
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
{
let mut ten_u32s: VecDeque<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
panic!("isize::MAX shouldn't trigger an overflow!");
}
if guards_against_isize {
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an overflow!");
}
} else {
- if let Err(AllocError { .. }) = ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9) {
+ if let Err(AllocError { .. }) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind())
+ {
} else {
panic!("isize::MAX + 1 should trigger an OOM!")
}
}
- if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) {
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind())
+ {
} else {
panic!("usize::MAX should trigger an overflow!")
}
self.map(f).is_sorted()
}
- /// See [TrustedRandomAccess]
+ /// See [TrustedRandomAccess][super::super::TrustedRandomAccess]
// The unusual name is to avoid name collisions in method resolution
// see #76479.
#[inline]
self as _
}
- /// Decompose a (possibly wide) pointer into is address and metadata components.
+ /// Decompose a (possibly wide) pointer into its address and metadata components.
///
/// The pointer can be later reconstructed with [`from_raw_parts`].
#[unstable(feature = "ptr_metadata", issue = "81513")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
pub const unsafe fn read<T>(src: *const T) -> T {
+ // We are calling the intrinsics directly to avoid function calls in the generated code
+ // as `intrinsics::copy_nonoverlapping` is a wrapper function.
+ extern "rust-intrinsic" {
+ #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
+ fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
let mut tmp = MaybeUninit::<T>::uninit();
// SAFETY: the caller must guarantee that `src` is valid for reads.
// `src` cannot overlap `tmp` because `tmp` was just allocated on
self as _
}
- /// Decompose a (possibly wide) pointer into is address and metadata components.
+ /// Decompose a (possibly wide) pointer into its address and metadata components.
///
/// The pointer can be later reconstructed with [`from_raw_parts_mut`].
#[unstable(feature = "ptr_metadata", issue = "81513")]
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![feature(asm)]
+#![feature(c_unwind)]
#[cfg(target_os = "android")]
mod android;
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
-pub unsafe extern "C" fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
+pub unsafe extern "C-unwind" fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
macro_rules! diagnostic_child_methods {
($spanned:ident, $regular:ident, $level:expr) => {
- /// Adds a new child diagnostic message to `self` with the level
- /// identified by this method's name with the given `spans` and
- /// `message`.
#[unstable(feature = "proc_macro_diagnostic", issue = "54140")]
+ #[doc = concat!("Adds a new child diagnostics message to `self` with the [`",
+ stringify!($level), "`] level, and the given `spans` and `message`.")]
pub fn $spanned<S, T>(mut self, spans: S, message: T) -> Diagnostic
where
S: MultiSpan,
self
}
- /// Adds a new child diagnostic message to `self` with the level
- /// identified by this method's name with the given `message`.
#[unstable(feature = "proc_macro_diagnostic", issue = "54140")]
+ #[doc = concat!("Adds a new child diagnostic message to `self` with the [`",
+ stringify!($level), "`] level, and the given `message`.")]
pub fn $regular<T: Into<String>>(mut self, message: T) -> Diagnostic {
self.children.push(Diagnostic::new($level, message));
self
use crate::borrow::Borrow;
use crate::cell::Cell;
use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind;
use crate::fmt::{self, Debug};
#[allow(deprecated)]
use crate::hash::{BuildHasher, Hash, Hasher, SipHasher13};
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32> = HashMap::with_capacity(100);
/// assert!(map.capacity() >= 2);
/// ```
#[inline]
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.base.shrink_to(min_capacity);
}
#[inline]
pub(super) fn map_try_reserve_error(err: hashbrown::TryReserveError) -> TryReserveError {
match err {
- hashbrown::TryReserveError::CapacityOverflow => TryReserveError::CapacityOverflow,
+ hashbrown::TryReserveError::CapacityOverflow => {
+ TryReserveErrorKind::CapacityOverflow.into()
+ }
hashbrown::TryReserveError::AllocError { layout } => {
- TryReserveError::AllocError { layout, non_exhaustive: () }
+ TryReserveErrorKind::AllocError { layout, non_exhaustive: () }.into()
}
}
}
use super::RandomState;
use crate::cell::RefCell;
use rand::{thread_rng, Rng};
-use realstd::collections::TryReserveError::*;
+use realstd::collections::TryReserveErrorKind::*;
// https://github.com/rust-lang/rust/issues/62301
fn _assert_hashmap_is_unwind_safe() {
const MAX_USIZE: usize = usize::MAX;
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8) {
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8).map_err(|e| e.kind()) {
} else {
panic!("usize::MAX / 8 should trigger an OOM!")
}
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// use std::collections::HashSet;
///
/// let mut set = HashSet::with_capacity(100);
/// assert!(set.capacity() >= 2);
/// ```
#[inline]
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.base.shrink_to(min_capacity)
}
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub use alloc_crate::collections::TryReserveError;
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+pub use alloc_crate::collections::TryReserveErrorKind;
mod hash;
/// # Examples
///
/// ```
- /// #![feature(shrink_to)]
/// use std::ffi::OsString;
///
/// let mut s = OsString::from("foo");
/// assert!(s.capacity() >= 3);
/// ```
#[inline]
- #[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.inner.shrink_to(min_capacity)
}
///
/// The iterator will yield instances of [`io::Result`]`<`[`DirEntry`]`>`.
/// New errors may be encountered after an iterator is initially constructed.
+/// Entries for the current and parent directories (typically `.` and `..`) are
+/// skipped.
///
/// # Platform-specific behavior
///
/// the documentation of [`empty()`] for more details.
#[stable(feature = "rust1", since = "1.0.0")]
#[non_exhaustive]
+#[derive(Copy, Clone, Default)]
pub struct Empty;
/// Constructs a new handle to an empty reader.
/// see the documentation of [`sink()`] for more details.
#[stable(feature = "rust1", since = "1.0.0")]
#[non_exhaustive]
+#[derive(Copy, Clone, Default)]
pub struct Sink;
/// Creates an instance of a writer which will successfully consume all data.
#![feature(ptr_internals)]
#![feature(rustc_attrs)]
#![feature(rustc_private)]
-#![feature(shrink_to)]
#![feature(slice_concat_ext)]
#![feature(slice_internals)]
#![feature(slice_ptr_get)]
#![feature(trace_macros)]
#![feature(try_blocks)]
#![feature(try_reserve)]
+#![feature(try_reserve_kind)]
#![feature(unboxed_closures)]
#![feature(unsafe_cell_raw_get)]
#![feature(unwrap_infallible)]
/// Invokes [`shrink_to`] on the underlying instance of [`OsString`].
///
/// [`shrink_to`]: OsString::shrink_to
- #[unstable(feature = "shrink_to", issue = "56431")]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
#[inline]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.inner.shrink_to(min_capacity)
///
/// In Unix terms the return value is the **exit status**: the value passed to `exit`, if the
/// process finished by calling `exit`. Note that on Unix the exit status is truncated to 8
- /// bits, and that values that didn't come from a program's call to `exit` may be invented the
+ /// bits, and that values that didn't come from a program's call to `exit` may be invented by the
/// runtime system (often, for example, 255, 254, 127 or 126).
///
/// On Unix, this will return `None` if the process was terminated by a signal.
#[cfg(target_os = "haiku")]
pub fn current_exe() -> io::Result<PathBuf> {
- // Use Haiku's image info functions
- #[repr(C)]
- struct image_info {
- id: i32,
- type_: i32,
- sequence: i32,
- init_order: i32,
- init_routine: *mut libc::c_void, // function pointer
- term_routine: *mut libc::c_void, // function pointer
- device: libc::dev_t,
- node: libc::ino_t,
- name: [libc::c_char; 1024], // MAXPATHLEN
- text: *mut libc::c_void,
- data: *mut libc::c_void,
- text_size: i32,
- data_size: i32,
- api_version: i32,
- abi: i32,
- }
-
unsafe {
- extern "C" {
- fn _get_next_image_info(
- team_id: i32,
- cookie: *mut i32,
- info: *mut image_info,
- size: i32,
- ) -> i32;
- }
-
- let mut info: image_info = mem::zeroed();
+ let mut info: mem::MaybeUninit<libc::image_info> = mem::MaybeUninit::uninit();
let mut cookie: i32 = 0;
// the executable can be found at team id 0
- let result =
- _get_next_image_info(0, &mut cookie, &mut info, mem::size_of::<image_info>() as i32);
+ let result = libc::_get_next_image_info(
+ 0,
+ &mut cookie,
+ info.as_mut_ptr(),
+ mem::size_of::<libc::image_info>(),
+ );
if result != 0 {
use crate::io::ErrorKind;
Err(io::Error::new_const(ErrorKind::Uncategorized, &"Error getting executable path"))
} else {
- let name = CStr::from_ptr(info.name.as_ptr()).to_bytes();
+ let name = CStr::from_ptr((*info.as_ptr()).name.as_ptr()).to_bytes();
Ok(PathBuf::from(OsStr::from_bytes(name)))
}
}
- `llvm-libunwind` now accepts `in-tree` (formerly true), `system` or `no` (formerly false) [#77703](https://github.com/rust-lang/rust/pull/77703)
- The options `infodir`, `localstatedir`, and `gpg-password-file` are no longer allowed in config.toml. Previously, they were ignored without warning. Note that `infodir` and `localstatedir` are still accepted by `./configure`, with a warning. [#82451](https://github.com/rust-lang/rust/pull/82451)
+- Add options for enabling overflow checks, one for std (`overflow-checks-std`) and one for everything else (`overflow-checks`). Both default to false.
### Non-breaking changes
self.config.rust_debug_assertions.to_string()
},
);
+ cargo.env(
+ profile_var("OVERFLOW_CHECKS"),
+ if mode == Mode::Std {
+ self.config.rust_overflow_checks_std.to_string()
+ } else {
+ self.config.rust_overflow_checks.to_string()
+ },
+ );
// `dsymutil` adds time to builds on Apple platforms for no clear benefit, and also makes
// it more difficult for debuggers to find debug info. The compiler currently defaults to
// efficient initial-exec TLS model. This doesn't work with `dlopen`,
// so we can't use it by default in general, but we can use it for tools
// and our own internal libraries.
- if !mode.must_support_dlopen() {
+ if !mode.must_support_dlopen() && !target.triple.starts_with("powerpc-") {
rustflags.arg("-Ztls-model=initial-exec");
}
fail_fast: true,
doc_tests: DocTests::No,
bless: false,
+ force_rerun: false,
compare_mode: None,
rustfix_coverage: false,
pass: None,
fail_fast: true,
doc_tests: DocTests::No,
bless: false,
+ force_rerun: false,
compare_mode: None,
rustfix_coverage: false,
pass: None,
fail_fast: true,
doc_tests: DocTests::Yes,
bless: false,
+ force_rerun: false,
compare_mode: None,
rustfix_coverage: false,
pass: None,
pub rust_codegen_units_std: Option<u32>,
pub rust_debug_assertions: bool,
pub rust_debug_assertions_std: bool,
+ pub rust_overflow_checks: bool,
+ pub rust_overflow_checks_std: bool,
pub rust_debug_logging: bool,
pub rust_debuginfo_level_rustc: u32,
pub rust_debuginfo_level_std: u32,
codegen_units_std: Option<u32>,
debug_assertions: Option<bool>,
debug_assertions_std: Option<bool>,
+ overflow_checks: Option<bool>,
+ overflow_checks_std: Option<bool>,
debug_logging: Option<bool>,
debuginfo_level: Option<u32>,
debuginfo_level_rustc: Option<u32>,
let mut debug = None;
let mut debug_assertions = None;
let mut debug_assertions_std = None;
+ let mut overflow_checks = None;
+ let mut overflow_checks_std = None;
let mut debug_logging = None;
let mut debuginfo_level = None;
let mut debuginfo_level_rustc = None;
debug = rust.debug;
debug_assertions = rust.debug_assertions;
debug_assertions_std = rust.debug_assertions_std;
+ overflow_checks = rust.overflow_checks;
+ overflow_checks_std = rust.overflow_checks_std;
debug_logging = rust.debug_logging;
debuginfo_level = rust.debuginfo_level;
debuginfo_level_rustc = rust.debuginfo_level_rustc;
config.rust_debug_assertions = debug_assertions.unwrap_or(default);
config.rust_debug_assertions_std =
debug_assertions_std.unwrap_or(config.rust_debug_assertions);
+ config.rust_overflow_checks = overflow_checks.unwrap_or(default);
+ config.rust_overflow_checks_std = overflow_checks_std.unwrap_or(default);
config.rust_debug_logging = debug_logging.unwrap_or(config.rust_debug_assertions);
o("llvm-assertions", "llvm.assertions", "build LLVM with assertions")
o("llvm-plugins", "llvm.plugins", "build LLVM with plugin interface")
o("debug-assertions", "rust.debug-assertions", "build with debugging assertions")
+o("overflow-checks", "rust.overflow-checks", "build with overflow checks")
o("llvm-release-debuginfo", "llvm.release-debuginfo", "build LLVM with debugger metadata")
v("debuginfo-level", "rust.debuginfo-level", "debuginfo level for Rust code")
v("debuginfo-level-rustc", "rust.debuginfo-level-rustc", "debuginfo level for the compiler")
paths: Vec<PathBuf>,
/// Whether to automatically update stderr/stdout files
bless: bool,
+ force_rerun: bool,
compare_mode: Option<String>,
pass: Option<String>,
run: Option<String>,
opts.optflag("", "no-doc", "do not run doc tests");
opts.optflag("", "doc", "only run doc tests");
opts.optflag("", "bless", "update all stderr/stdout files of failing ui tests");
+ opts.optflag("", "force-rerun", "rerun tests even if the inputs are unchanged");
opts.optopt(
"",
"compare-mode",
"test" | "t" => Subcommand::Test {
paths,
bless: matches.opt_present("bless"),
+ force_rerun: matches.opt_present("force-rerun"),
compare_mode: matches.opt_str("compare-mode"),
pass: matches.opt_str("pass"),
run: matches.opt_str("run"),
}
}
+ pub fn force_rerun(&self) -> bool {
+ match *self {
+ Subcommand::Test { force_rerun, .. } => force_rerun,
+ _ => false,
+ }
+ }
+
pub fn rustfix_coverage(&self) -> bool {
match *self {
Subcommand::Test { rustfix_coverage, .. } => rustfix_coverage,
cmd.arg("--bless");
}
+ if builder.config.cmd.force_rerun() {
+ cmd.arg("--force-rerun");
+ }
+
let compare_mode =
builder.config.cmd.compare_mode().or_else(|| {
if builder.config.test_compare_mode { self.compare_mode } else { None }
Struct(VariantStruct),
}
-/// Small wrapper around [`rustc_span::Span]` that adds helper methods
+/// Small wrapper around [`rustc_span::Span`] that adds helper methods
/// and enforces calling [`rustc_span::Span::source_callsite()`].
#[derive(Copy, Clone, Debug)]
crate struct Span(rustc_span::Span);
--- /dev/null
+// This test ensures that `mem::replace::<T>` only ever calls `@llvm.memcpy`
+// with `size_of::<T>()` as the size, and never goes through any wrapper that
+// may e.g. multiply `size_of::<T>()` with a variable "count" (which is only
+// known to be `1` after inlining).
+
+// compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+
+pub fn replace_byte(dst: &mut u8, src: u8) -> u8 {
+ std::mem::replace(dst, src)
+}
+
+// NOTE(eddyb) the `CHECK-NOT`s ensure that the only calls of `@llvm.memcpy` in
+// the entire output, are the two direct calls we want, from `ptr::{read,write}`.
+
+// CHECK-NOT: call void @llvm.memcpy
+// CHECK: ; core::ptr::read
+// CHECK-NOT: call void @llvm.memcpy
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{.*}}(i8* align 1 %{{.*}}, i8* align 1 %src, i{{.*}} 1, i1 false)
+// CHECK-NOT: call void @llvm.memcpy
+// CHECK: ; core::ptr::write
+// CHECK-NOT: call void @llvm.memcpy
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{.*}}(i8* align 1 %dst, i8* align 1 %src, i{{.*}} 1, i1 false)
+// CHECK-NOT: call void @llvm.memcpy
+++ /dev/null
-// compile-flags: -O
-// ignore-debug: the debug assertions get in the way
-#![crate_type = "lib"]
-#![feature(shrink_to)]
-
-// Make sure that `Vec::shrink_to_fit` never emits panics via `RawVec::shrink_to_fit`,
-// "Tried to shrink to a larger capacity", because the length is *always* <= capacity.
-
-// CHECK-LABEL: @shrink_to_fit
-#[no_mangle]
-pub fn shrink_to_fit(vec: &mut Vec<u32>) {
- // CHECK-NOT: panic
- vec.shrink_to_fit();
-}
-
-// CHECK-LABEL: @issue71861
-#[no_mangle]
-pub fn issue71861(vec: Vec<u32>) -> Box<[u32]> {
- // CHECK-NOT: panic
- vec.into_boxed_slice()
-}
-
-// CHECK-LABEL: @issue75636
-#[no_mangle]
-pub fn issue75636<'a>(iter: &[&'a str]) -> Box<[&'a str]> {
- // CHECK-NOT: panic
- iter.iter().copied().collect()
-}
--- /dev/null
+// compile-flags: -O
+// ignore-debug: the debug assertions get in the way
+#![crate_type = "lib"]
+#![feature(shrink_to)]
+
+// Make sure that `Vec::shrink_to_fit` never emits panics via `RawVec::shrink_to_fit`,
+// "Tried to shrink to a larger capacity", because the length is *always* <= capacity.
+
+// CHECK-LABEL: @shrink_to_fit
+#[no_mangle]
+pub fn shrink_to_fit(vec: &mut Vec<u32>) {
+ // CHECK-NOT: panic
+ vec.shrink_to_fit();
+}
+
+// CHECK-LABEL: @issue71861
+#[no_mangle]
+pub fn issue71861(vec: Vec<u32>) -> Box<[u32]> {
+ // CHECK-NOT: panic
+ vec.into_boxed_slice()
+}
+
+// CHECK-LABEL: @issue75636
+#[no_mangle]
+pub fn issue75636<'a>(iter: &[&'a str]) -> Box<[&'a str]> {
+ // CHECK-NOT: panic
+ iter.iter().copied().collect()
+}
#[naked]
pub unsafe fn default_abi() {
//~^ WARN Rust ABI is unsupported in naked functions
- //~| WARN this was previously accepted
asm!("", options(noreturn));
}
#[naked]
pub unsafe extern "Rust" fn rust_abi() {
//~^ WARN Rust ABI is unsupported in naked functions
- //~| WARN this was previously accepted
asm!("", options(noreturn));
}
LL | pub unsafe fn default_abi() {
| ^^^^^^^^^^^
|
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
+ = note: `#[warn(undefined_naked_function_abi)]` on by default
warning: Rust ABI is unsupported in naked functions
- --> $DIR/naked-functions.rs:142:29
+ --> $DIR/naked-functions.rs:141:29
|
LL | pub unsafe extern "Rust" fn rust_abi() {
| ^^^^^^^^
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
- --> $DIR/naked-functions.rs:177:1
+ --> $DIR/naked-functions.rs:175:1
|
LL | #[inline]
| ^^^^^^^^^
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
- --> $DIR/naked-functions.rs:185:1
+ --> $DIR/naked-functions.rs:183:1
|
LL | #[inline(always)]
| ^^^^^^^^^^^^^^^^^
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
- --> $DIR/naked-functions.rs:193:1
+ --> $DIR/naked-functions.rs:191:1
|
LL | #[inline(never)]
| ^^^^^^^^^^^^^^^^
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
- --> $DIR/naked-functions.rs:201:1
+ --> $DIR/naked-functions.rs:199:1
|
LL | #[inline]
| ^^^^^^^^^
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
- --> $DIR/naked-functions.rs:204:1
+ --> $DIR/naked-functions.rs:202:1
|
LL | #[inline(always)]
| ^^^^^^^^^^^^^^^^^
= note: for more information, see issue #32408 <https://github.com/rust-lang/rust/issues/32408>
warning: naked functions cannot be inlined
- --> $DIR/naked-functions.rs:207:1
+ --> $DIR/naked-functions.rs:205:1
|
LL | #[inline(never)]
| ^^^^^^^^^^^^^^^^
--- /dev/null
+// run-rustfix
+
+trait Trait {
+ const FOO: usize;
+
+ type Target;
+}
+
+struct S;
+
+impl Trait for S {
+ const FOO: usize = 0;
+ type Target = ();
+}
+
+fn main() {
+ let _: <S as Trait>::Target; //~ ERROR cannot find associated type `Output` in trait `Trait`
+ //~^ HELP maybe you meant this associated type
+
+ let _ = <S as Trait>::FOO; //~ ERROR cannot find method or associated constant `BAR` in trait `Trait`
+ //~^ HELP maybe you meant this associated constant
+}
--- /dev/null
+// run-rustfix
+
+trait Trait {
+ const FOO: usize;
+
+ type Target;
+}
+
+struct S;
+
+impl Trait for S {
+ const FOO: usize = 0;
+ type Target = ();
+}
+
+fn main() {
+ let _: <S as Trait>::Output; //~ ERROR cannot find associated type `Output` in trait `Trait`
+ //~^ HELP maybe you meant this associated type
+
+ let _ = <S as Trait>::BAR; //~ ERROR cannot find method or associated constant `BAR` in trait `Trait`
+ //~^ HELP maybe you meant this associated constant
+}
--- /dev/null
+error[E0576]: cannot find associated type `Output` in trait `Trait`
+ --> $DIR/issue-87638.rs:17:26
+ |
+LL | type Target;
+ | ------------ associated type `Target` defined here
+...
+LL | let _: <S as Trait>::Output;
+ | ^^^^^^
+ | |
+ | not found in `Trait`
+ | help: maybe you meant this associated type: `Target`
+
+error[E0576]: cannot find method or associated constant `BAR` in trait `Trait`
+ --> $DIR/issue-87638.rs:20:27
+ |
+LL | const FOO: usize;
+ | ----------------- associated constant `FOO` defined here
+...
+LL | let _ = <S as Trait>::BAR;
+ | ^^^
+ | |
+ | not found in `Trait`
+ | help: maybe you meant this associated constant: `FOO`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0576`.
error[E0576]: cannot find associated type `X` in trait `A`
--> $DIR/issue-22037.rs:3:33
|
+LL | type Output;
+ | ------------ associated type `Output` defined here
LL | fn a(&self) -> <Self as A>::X;
- | ^ not found in `A`
+ | ^
+ | |
+ | not found in `A`
+ | help: maybe you meant this associated type: `Output`
error: aborting due to previous error
--> $DIR/no-async-const.rs:4:11
|
LL | pub async const fn x() {}
- | ^^^^^ expected one of `extern`, `fn`, or `unsafe`
+ | ------^^^^^
+ | | |
+ | | expected one of `extern`, `fn`, or `unsafe`
+ | help: `const` must come before `async`: `const async`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
error: aborting due to previous error
| - while parsing this item list starting here
LL | #[cfg(FALSE)]
LL | unsafe async fn g() {}
- | ^^^^^ expected one of `extern` or `fn`
+ | -------^^^^^
+ | | |
+ | | expected one of `extern` or `fn`
+ | help: `async` must come before `unsafe`: `async unsafe`
LL | }
| - the item list ends here
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
error: expected one of `extern` or `fn`, found keyword `async`
--> $DIR/no-unsafe-async.rs:11:8
|
LL | unsafe async fn f() {}
- | ^^^^^ expected one of `extern` or `fn`
+ | -------^^^^^
+ | | |
+ | | expected one of `extern` or `fn`
+ | help: `async` must come before `unsafe`: `async unsafe`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
error: aborting due to 2 previous errors
--- /dev/null
+#![feature(const_generics, const_evaluatable_checked)]
+#![allow(incomplete_features)]
+
+// library portion of regression test for #87674
+pub struct Foo<const N: usize>([(); N + 1])
+where
+ [(); N + 1]: ;
+
+// library portion of regression test for #87603
+pub struct S<T: Copy + Default, const N: usize>
+where
+ [T; N * 2]: Sized,
+{
+ pub s: [T; N * 2],
+}
+impl<T: Default + Copy, const N: usize> S<T, N>
+where
+ [T; N * 2]: Sized,
+{
+ pub fn test() -> Self {
+ S { s: [T::default(); N * 2] }
+ }
+}
--- /dev/null
+#![feature(const_generics, const_evaluatable_checked)]
+#![allow(incomplete_features)]
+
+// library portion of testing that `impl Trait<{ expr }>` doesnt
+// ice because of a `DefKind::TyParam` parent
+pub fn foo<const N: usize>(foo: impl Into<[(); N + 1]>) {
+ foo.into();
+}
--- /dev/null
+// aux-build:generics_of_parent.rs
+// check-pass
+#![feature(const_generics, const_evaluatable_checked)]
+#![allow(incomplete_features)]
+
+extern crate generics_of_parent;
+
+use generics_of_parent::{Foo, S};
+
+fn main() {
+ // regression test for #87603
+ const N: usize = 2;
+ let x: S<u8, N> = S::test();
+}
+
+// regression test for #87674
+fn new<U>(a: U) -> U {
+ a
+}
+fn foo<const N: usize>(bar: &mut Foo<N>)
+where
+ [(); N + 1]: ,
+{
+ *bar = new(loop {});
+}
--- /dev/null
+// aux-build:generics_of_parent_impl_trait.rs
+#![feature(const_generics, const_evaluatable_checked)]
+#![allow(incomplete_features)]
+
+extern crate generics_of_parent_impl_trait;
+
+fn main() {
+ // check for `impl Trait<{ const }>` which has a parent of a `DefKind::TyParam`
+ generics_of_parent_impl_trait::foo([()]);
+ //~^ error: type annotations needed:
+}
--- /dev/null
+error[E0284]: type annotations needed: cannot satisfy `the constant `foo::{opaque#0}::{constant#0}` can be evaluated`
+ --> $DIR/parent_generics_of_encoding_impl_trait.rs:9:5
+ |
+LL | generics_of_parent_impl_trait::foo([()]);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cannot satisfy `the constant `foo::{opaque#0}::{constant#0}` can be evaluated`
+ |
+ ::: $DIR/auxiliary/generics_of_parent_impl_trait.rs:6:48
+ |
+LL | pub fn foo<const N: usize>(foo: impl Into<[(); N + 1]>) {
+ | ----- required by this bound in `foo`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0284`.
error[E0080]: evaluation of constant value failed
- --> $SRC_DIR/core/src/intrinsics.rs:LL:COL
- |
-LL | unsafe { copy_nonoverlapping(src, dst, count) }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
- | inside `copy_nonoverlapping::<u32>` at $SRC_DIR/core/src/intrinsics.rs:LL:COL
- |
- ::: $SRC_DIR/core/src/ptr/mod.rs:LL:COL
+ --> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
LL | copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
- | --------------------------------------------- inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
+ | inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
::: $DIR/out_of_bounds_read.rs:13:33
|
| ----------------------- inside `_READ` at $DIR/out_of_bounds_read.rs:13:33
error[E0080]: evaluation of constant value failed
- --> $SRC_DIR/core/src/intrinsics.rs:LL:COL
- |
-LL | unsafe { copy_nonoverlapping(src, dst, count) }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
- | inside `copy_nonoverlapping::<u32>` at $SRC_DIR/core/src/intrinsics.rs:LL:COL
- |
- ::: $SRC_DIR/core/src/ptr/mod.rs:LL:COL
+ --> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
LL | copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
- | --------------------------------------------- inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
+ | inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
::: $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
| ------------------- inside `_CONST_READ` at $DIR/out_of_bounds_read.rs:14:39
error[E0080]: evaluation of constant value failed
- --> $SRC_DIR/core/src/intrinsics.rs:LL:COL
- |
-LL | unsafe { copy_nonoverlapping(src, dst, count) }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- | |
- | memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
- | inside `copy_nonoverlapping::<u32>` at $SRC_DIR/core/src/intrinsics.rs:LL:COL
- |
- ::: $SRC_DIR/core/src/ptr/mod.rs:LL:COL
+ --> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
LL | copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
- | --------------------------------------------- inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
+ | inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
::: $SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL
|
--- /dev/null
+// Test native_link_modifiers_bundle don't need static-nobundle
+// check-pass
+
+#![feature(native_link_modifiers)]
+#![feature(native_link_modifiers_bundle)]
+
+#[link(name = "foo", kind = "static", modifiers = "-bundle")]
+extern "C" {}
+
+fn main() {}
--- /dev/null
+// compile-flags: -l static:-bundle=nonexistent
+
+fn main() {}
--- /dev/null
+error: linking modifiers are currently unstable, the `-Z unstable-options` flag must also be passed to use it
+
-//~ ERROR kind="static-nobundle" is unstable
-// Test the behavior of rustc when non-existent library is statically linked
-
+// check-pass
// compile-flags: -l static-nobundle=nonexistent
fn main() {}
warning: library kind `static-nobundle` has been superseded by specifying `-bundle` on library kind `static`. Try `static:-bundle`
-error[E0658]: kind="static-nobundle" is unstable
- |
- = note: see issue #37403 <https://github.com/rust-lang/rust/issues/37403> for more information
- = help: add `#![feature(static_nobundle)]` to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
| ^^^^^^^^^^^^^^^^^^^^^^^^
error[E0658]: kind="static-nobundle" is unstable
- --> $DIR/feature-gate-static-nobundle.rs:1:1
+ --> $DIR/feature-gate-static-nobundle.rs:1:22
|
LL | #[link(name = "foo", kind = "static-nobundle")]
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: see issue #37403 <https://github.com/rust-lang/rust/issues/37403> for more information
= help: add `#![feature(static_nobundle)]` to the crate attributes to enable
error[E0576]: cannot find associated type `Dst` in trait `From`
--> $DIR/issue-19883.rs:9:30
|
+LL | type Output;
+ | ------------ associated type `Output` defined here
+...
LL | <Dst as From<Self>>::Dst
- | ^^^ not found in `From`
+ | ^^^
+ | |
+ | not found in `From`
+ | help: maybe you meant this associated type: `Output`
error: aborting due to previous error
--- /dev/null
+// edition:2018
+
+// Test that even when `const` is already present, the proposed fix is `const const async`,
+// like for `pub pub`.
+
+const async const fn test() {}
+//~^ ERROR expected one of `extern`, `fn`, or `unsafe`, found keyword `const`
+//~| NOTE expected one of `extern`, `fn`, or `unsafe`
+//~| HELP `const` must come before `async`
+//~| SUGGESTION const async
+//~| NOTE keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
--- /dev/null
+error: expected one of `extern`, `fn`, or `unsafe`, found keyword `const`
+ --> $DIR/const-async-const.rs:6:13
+ |
+LL | const async const fn test() {}
+ | ------^^^^^
+ | | |
+ | | expected one of `extern`, `fn`, or `unsafe`
+ | help: `const` must come before `async`: `const async`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
+
+error: aborting due to previous error
+
--- /dev/null
+// edition:2018
+
+// There is an order to respect for keywords before a function:
+// `<visibility>, const, async, unsafe, extern, "<ABI>"`
+//
+// This test ensures the compiler is helpful about them being misplaced.
+// Visibilities are tested elsewhere.
+
+async unsafe const fn test() {}
+//~^ ERROR expected one of `extern` or `fn`, found keyword `const`
+//~| NOTE expected one of `extern` or `fn`
+//~| HELP `const` must come before `async unsafe`
+//~| SUGGESTION const async unsafe
+//~| NOTE keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
--- /dev/null
+error: expected one of `extern` or `fn`, found keyword `const`
+ --> $DIR/several-kw-jump.rs:9:14
+ |
+LL | async unsafe const fn test() {}
+ | -------------^^^^^
+ | | |
+ | | expected one of `extern` or `fn`
+ | help: `const` must come before `async unsafe`: `const async unsafe`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
+
+error: aborting due to previous error
+
--- /dev/null
+// edition:2018
+
+// There is an order to respect for keywords before a function:
+// `<visibility>, const, async, unsafe, extern, "<ABI>"`
+//
+// This test ensures the compiler is helpful about them being misplaced.
+// Visibilities are tested elsewhere.
+
+unsafe async fn test() {}
+//~^ ERROR expected one of `extern` or `fn`, found keyword `async`
+//~| NOTE expected one of `extern` or `fn`
+//~| HELP `async` must come before `unsafe`
+//~| SUGGESTION async unsafe
+//~| NOTE keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
--- /dev/null
+error: expected one of `extern` or `fn`, found keyword `async`
+ --> $DIR/wrong-async.rs:9:8
+ |
+LL | unsafe async fn test() {}
+ | -------^^^^^
+ | | |
+ | | expected one of `extern` or `fn`
+ | help: `async` must come before `unsafe`: `async unsafe`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
+
+error: aborting due to previous error
+
--- /dev/null
+// edition:2018
+
+// There is an order to respect for keywords before a function:
+// `<visibility>, const, async, unsafe, extern, "<ABI>"`
+//
+// This test ensures the compiler is helpful about them being misplaced.
+// Visibilities are tested elsewhere.
+
+unsafe const fn test() {}
+//~^ ERROR expected one of `extern` or `fn`, found keyword `const`
+//~| NOTE expected one of `extern` or `fn`
+//~| HELP `const` must come before `unsafe`
+//~| SUGGESTION const unsafe
+//~| NOTE keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
--- /dev/null
+error: expected one of `extern` or `fn`, found keyword `const`
+ --> $DIR/wrong-const.rs:9:8
+ |
+LL | unsafe const fn test() {}
+ | -------^^^^^
+ | | |
+ | | expected one of `extern` or `fn`
+ | help: `const` must come before `unsafe`: `const unsafe`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
+
+error: aborting due to previous error
+
--- /dev/null
+// edition:2018
+
+// There is an order to respect for keywords before a function:
+// `<visibility>, const, async, unsafe, extern, "<ABI>"`
+//
+// This test ensures the compiler is helpful about them being misplaced.
+// Visibilities are tested elsewhere.
+
+extern unsafe fn test() {}
+//~^ ERROR expected `fn`, found keyword `unsafe`
+//~| NOTE expected `fn`
+//~| HELP `unsafe` must come before `extern`
+//~| SUGGESTION unsafe extern
+//~| NOTE keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
--- /dev/null
+error: expected `fn`, found keyword `unsafe`
+ --> $DIR/wrong-unsafe.rs:9:8
+ |
+LL | extern unsafe fn test() {}
+ | -------^^^^^^
+ | | |
+ | | expected `fn`
+ | help: `unsafe` must come before `extern`: `unsafe extern`
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
+
+error: aborting due to previous error
+
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:10:13
|
LL | let u = U {};
| ^
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:12:13
|
LL | let u = U { a: 0, b: 1 };
|
= note: available fields are: `a`, `b`
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:13:13
|
LL | let u = U { a: 0, b: 1, c: 2 };
| ^
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:15:13
|
LL | let u = U { ..u };
error: aborting due to 13 previous errors
-Some errors have detailed explanations: E0026, E0436, E0560.
+Some errors have detailed explanations: E0026, E0436, E0560, E0784.
For more information about an error, try `rustc --explain E0026`.
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:10:13
|
LL | let u = U {};
| ^
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:12:13
|
LL | let u = U { a: 0, b: 1 };
|
= note: available fields are: `a`, `b`
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:13:13
|
LL | let u = U { a: 0, b: 1, c: 2 };
| ^
-error: union expressions should have exactly one field
+error[E0784]: union expressions should have exactly one field
--> $DIR/union-fields-2.rs:15:13
|
LL | let u = U { ..u };
error: aborting due to 13 previous errors
-Some errors have detailed explanations: E0026, E0436, E0560.
+Some errors have detailed explanations: E0026, E0436, E0560, E0784.
For more information about an error, try `rustc --explain E0026`.
pub nodejs: Option<String>,
/// Path to a npm executable. Used for rustdoc GUI tests
pub npm: Option<String>,
+
+ /// Whether to rerun tests even if the inputs are unchanged.
+ pub force_rerun: bool,
}
impl Config {
"enable this to generate a Rustfix coverage file, which is saved in \
`./<build_base>/rustfix_missing_coverage.txt`",
)
+ .optflag("", "force-rerun", "rerun tests even if the inputs are unchanged")
.optflag("h", "help", "show this message")
.reqopt("", "channel", "current Rust channel", "CHANNEL");
llvm_components: matches.opt_str("llvm-components").unwrap(),
nodejs: matches.opt_str("nodejs"),
npm: matches.opt_str("npm"),
+
+ force_rerun: matches.opt_present("force-rerun"),
}
}
let test_name = crate::make_test_name(config, testpaths, revision);
let mut desc = make_test_description(config, test_name, &test_path, src_file, cfg);
// Ignore tests that already run and are up to date with respect to inputs.
- desc.ignore |= is_up_to_date(
- config,
- testpaths,
- &early_props,
- revision.map(|s| s.as_str()),
- inputs,
- );
+ if !config.force_rerun {
+ desc.ignore |= is_up_to_date(
+ config,
+ testpaths,
+ &early_props,
+ revision.map(|s| s.as_str()),
+ inputs,
+ );
+ }
test::TestDescAndFn { desc, testfn: make_test_closure(config, testpaths, revision) }
})
.collect()
/// An HTML file.
///
/// This includes the contents of the HTML file, and an optional set of
- /// HTML IDs. The IDs are used for checking fragments. The are computed
+ /// HTML IDs. The IDs are used for checking fragments. They are computed
/// as-needed. The source is discarded (replaced with an empty string)
/// after the file has been checked, to conserve on memory.
HtmlFile { source: Rc<String>, ids: RefCell<HashSet<String>> },
-Subproject commit 99ec9c1707aad74b4a4a6d301f27fb1c19733f58
+Subproject commit 042cbf175bfdad6524fd00d7570b2297a0426063