[submodule "src/compiler-rt"]
path = src/compiler-rt
url = https://github.com/rust-lang/compiler-rt.git
+[submodule "src/rt/hoedown"]
+ path = src/rt/hoedown
+ url = https://github.com/rust-lang/hoedown.git
+ branch = rust-2015-09-21-do-not-delete
[submodule "src/jemalloc"]
path = src/jemalloc
url = https://github.com/rust-lang/jemalloc.git
[submodule "book"]
path = src/doc/book
url = https://github.com/rust-lang/book.git
+[submodule "rls"]
+ path = rls
+ url = https://github.com/rust-lang-nursery/rls.git
+
- sh src/ci/run.sh
on_failure:
- - cat %CD%\sccache.log
- - cat C:\Users\appveyor\AppData\Local\Temp\1\build-cache-logs\*.log
+ - cat %CD%\sccache.log || exit 0
cache:
- C:\cache\rustsrc
-Subproject commit 4729175045b41b688ab903120860866ce7a22ba9
+Subproject commit c416fb60b11ecfd2a1ba0fb8567c9a92590b5d28
opt inject-std-version 1 "inject the current compiler version of libstd into programs"
opt llvm-version-check 1 "check if the LLVM version is supported, build anyway"
opt codegen-tests 1 "run the src/test/codegen tests"
-opt save-analysis 0 "save API analysis data"
opt option-checking 1 "complain about unrecognized options in this configure script"
opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)"
opt locked-deps 0 "force Cargo.lock to be up to date"
\fB\-\-crate\-name\fR \fINAME\fR
Specify the name of the crate being built.
.TP
-\fB\-\-emit\fR [asm|llvm\-bc|llvm\-ir|obj|link|dep\-info][=\fIPATH\fR]
+\fB\-\-emit\fR [asm|llvm\-bc|llvm\-ir|obj|link|dep\-info|mir][=\fIPATH\fR]
Configure the output that \fBrustc\fR will produce. Each emission may also have
an optional explicit output \fIPATH\fR specified for that particular emission
kind. This path takes precedence over the \fB-o\fR option.
--- /dev/null
+Subproject commit 016cbc514cf44a2bd3fe806e8afa6b9c50287373
cargo.env("CFG_DEFAULT_AR", s);
}
build.run(&mut cargo);
+ update_mtime(build, &librustc_stamp(build, compiler, target));
}
/// Same as `std_link`, only for librustc
build.cargo_out(compiler, Mode::Libtest, target).join(".libtest.stamp")
}
+/// Cargo's output path for librustc in a given stage, compiled by a particular
+/// compiler for the specified target.
+fn librustc_stamp(build: &Build, compiler: &Compiler, target: &str) -> PathBuf {
+ build.cargo_out(compiler, Mode::Librustc, target).join(".librustc.stamp")
+}
+
fn compiler_file(compiler: &Path, file: &str) -> PathBuf {
let out = output(Command::new(compiler)
.arg(format!("-print-file-name={}", file)));
}
}
+/// Build a tool in `src/tools`
+///
+/// This will build the specified tool with the specified `host` compiler in
+/// `stage` into the normal cargo output directory.
+pub fn maybe_clean_tools(build: &Build, stage: u32, target: &str, mode: Mode) {
+ let compiler = Compiler::new(stage, &build.config.build);
+
+ let stamp = match mode {
+ Mode::Libstd => libstd_stamp(build, &compiler, target),
+ Mode::Libtest => libtest_stamp(build, &compiler, target),
+ Mode::Librustc => librustc_stamp(build, &compiler, target),
+ _ => panic!(),
+ };
+ let out_dir = build.cargo_out(&compiler, Mode::Tool, target);
+ build.clear_if_dirty(&out_dir, &stamp);
+}
+
/// Build a tool in `src/tools`
///
/// This will build the specified tool with the specified `host` compiler in
let compiler = Compiler::new(stage, &build.config.build);
- // FIXME: need to clear out previous tool and ideally deps, may require
- // isolating output directories or require a pseudo shim step to
- // clear out all the info.
- //
- // Maybe when libstd is compiled it should clear out the rustc of the
- // corresponding stage?
- // let out_dir = build.cargo_out(stage, &host, Mode::Librustc, target);
- // build.clear_if_dirty(&out_dir, &libstd_stamp(build, stage, &host, target));
-
let mut cargo = build.cargo(&compiler, Mode::Tool, target, "build");
let mut dir = build.src.join(tool);
if !dir.exists() {
pub rustc_default_ar: Option<String>,
pub rust_optimize_tests: bool,
pub rust_debuginfo_tests: bool,
- pub rust_save_analysis: bool,
pub rust_dist_src: bool,
pub build: String,
optimize_tests: Option<bool>,
debuginfo_tests: Option<bool>,
codegen_tests: Option<bool>,
- save_analysis: Option<bool>,
}
/// TOML representation of how each build target is configured.
set(&mut config.rust_optimize_tests, rust.optimize_tests);
set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests);
set(&mut config.codegen_tests, rust.codegen_tests);
- set(&mut config.rust_save_analysis, rust.save_analysis);
set(&mut config.rust_rpath, rust.rpath);
set(&mut config.debug_jemalloc, rust.debug_jemalloc);
set(&mut config.use_jemalloc, rust.use_jemalloc);
("LOCAL_REBUILD", self.local_rebuild),
("NINJA", self.ninja),
("CODEGEN_TESTS", self.codegen_tests),
- ("SAVE_ANALYSIS", self.rust_save_analysis),
("LOCKED_DEPS", self.locked_deps),
("VENDOR", self.vendor),
("FULL_BOOTSTRAP", self.full_bootstrap),
# saying that the FileCheck executable is missing, you may want to disable this.
#codegen-tests = true
-# Flag indicating whether the API analysis data should be saved.
-#save-analysis = false
-
# =============================================================================
# Options for specific targets
#
fn pkgname(build: &Build, component: &str) -> String {
if component == "cargo" {
format!("{}-{}", component, build.cargo_package_vers())
+ } else if component == "rls" {
+ format!("{}-{}", component, build.package_vers(&build.release_num("rls")))
} else {
assert!(component.starts_with("rust"));
format!("{}-{}", component, build.rust_package_vers())
/// Creates a tarball of save-analysis metadata, if available.
pub fn analysis(build: &Build, compiler: &Compiler, target: &str) {
- if !build.config.rust_save_analysis {
- return
- }
-
+ assert!(build.config.extended);
println!("Dist analysis");
if compiler.host != build.config.build {
println!("\tskipping, not a build host");
- return
+ return;
}
// Package save-analysis from stage1 if not doing a full bootstrap, as the
"man",
"src",
"cargo",
+ "rls",
];
let filter_fn = move |path: &Path| {
let src = build.src.join("cargo");
let etc = src.join("src/etc");
- let release_num = build.cargo_release_num();
+ let release_num = build.release_num("cargo");
let name = pkgname(build, "cargo");
let version = build.cargo_info.version(build, &release_num);
build.run(&mut cmd);
}
+pub fn rls(build: &Build, stage: u32, target: &str) {
+ assert!(build.config.extended);
+ println!("Dist RLS stage{} ({})", stage, target);
+ let compiler = Compiler::new(stage, &build.config.build);
+
+ let src = build.src.join("rls");
+ let release_num = build.release_num("rls");
+ let name = pkgname(build, "rls");
+ let version = build.rls_info.version(build, &release_num);
+
+ let tmp = tmpdir(build);
+ let image = tmp.join("rls-image");
+ drop(fs::remove_dir_all(&image));
+ t!(fs::create_dir_all(&image));
+
+ // Prepare the image directory
+ let rls = build.cargo_out(&compiler, Mode::Tool, target)
+ .join(exe("rls", target));
+ install(&rls, &image.join("bin"), 0o755);
+ let doc = image.join("share/doc/rls");
+ install(&src.join("README.md"), &doc, 0o644);
+ install(&src.join("LICENSE-MIT"), &doc, 0o644);
+ install(&src.join("LICENSE-APACHE"), &doc, 0o644);
+
+ // Prepare the overlay
+ let overlay = tmp.join("rls-overlay");
+ drop(fs::remove_dir_all(&overlay));
+ t!(fs::create_dir_all(&overlay));
+ install(&src.join("README.md"), &overlay, 0o644);
+ install(&src.join("LICENSE-MIT"), &overlay, 0o644);
+ install(&src.join("LICENSE-APACHE"), &overlay, 0o644);
+ t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes()));
+
+ // Generate the installer tarball
+ let mut cmd = Command::new("sh");
+ cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh")))
+ .arg("--product-name=Rust")
+ .arg("--rel-manifest-dir=rustlib")
+ .arg("--success-message=RLS-ready-to-serve.")
+ .arg(format!("--image-dir={}", sanitize_sh(&image)))
+ .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build))))
+ .arg(format!("--output-dir={}", sanitize_sh(&distdir(build))))
+ .arg(format!("--non-installed-overlay={}", sanitize_sh(&overlay)))
+ .arg(format!("--package-name={}-{}", name, target))
+ .arg("--component-name=rls")
+ .arg("--legacy-manifest-dirs=rustlib,cargo");
+ build.run(&mut cmd);
+}
+
/// Creates a combined installer for the specified target in the provided stage.
pub fn extended(build: &Build, stage: u32, target: &str) {
println!("Dist extended stage{} ({})", stage, target);
let cargo_installer = dist.join(format!("{}-{}.tar.gz",
pkgname(build, "cargo"),
target));
+ let rls_installer = dist.join(format!("{}-{}.tar.gz",
+ pkgname(build, "rls"),
+ target));
+ let analysis_installer = dist.join(format!("{}-{}.tar.gz",
+ pkgname(build, "rust-analysis"),
+ target));
let docs_installer = dist.join(format!("{}-{}.tar.gz",
pkgname(build, "rust-docs"),
target));
// upgrades rustc was upgraded before rust-std. To avoid rustc clobbering
// the std files during uninstall. To do this ensure that rustc comes
// before rust-std in the list below.
- let mut input_tarballs = format!("{},{},{},{}",
+ let mut input_tarballs = format!("{},{},{},{},{},{}",
sanitize_sh(&rustc_installer),
sanitize_sh(&cargo_installer),
+ sanitize_sh(&rls_installer),
+ sanitize_sh(&analysis_installer),
sanitize_sh(&docs_installer),
sanitize_sh(&std_installer));
if target.contains("pc-windows-gnu") {
cmd.arg(distdir(build));
cmd.arg(today.trim());
cmd.arg(build.rust_package_vers());
- cmd.arg(build.package_vers(&build.cargo_release_num()));
+ cmd.arg(build.package_vers(&build.release_num("cargo")));
+ cmd.arg(build.package_vers(&build.release_num("rls")));
cmd.arg(addr);
t!(fs::create_dir_all(distdir(build)));
&docdir, &libdir, &mandir, &empty_dir);
}
- if build.config.rust_save_analysis {
- install_sh(&build, "analysis", "rust-analysis", stage, host, &prefix,
- &docdir, &libdir, &mandir, &empty_dir);
- }
-
install_sh(&build, "rustc", "rustc", stage, host, &prefix,
&docdir, &libdir, &mandir, &empty_dir);
t!(fs::remove_dir_all(&empty_dir));
out: PathBuf,
rust_info: channel::GitInfo,
cargo_info: channel::GitInfo,
+ rls_info: channel::GitInfo,
local_rebuild: bool,
// Probed tools at runtime
};
let rust_info = channel::GitInfo::new(&src);
let cargo_info = channel::GitInfo::new(&src.join("cargo"));
+ let rls_info = channel::GitInfo::new(&src.join("rls"));
let src_is_git = src.join(".git").exists();
Build {
rust_info: rust_info,
cargo_info: cargo_info,
+ rls_info: rls_info,
local_rebuild: local_rebuild,
cc: HashMap::new(),
cxx: HashMap::new(),
.env(format!("CFLAGS_{}", target), self.cflags(target).join(" "));
}
- if self.config.rust_save_analysis && compiler.is_final_stage(self) {
+ if self.config.extended && compiler.is_final_stage(self) {
cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string());
}
/// Returns the value of `package_vers` above for Cargo
fn cargo_package_vers(&self) -> String {
- self.package_vers(&self.cargo_release_num())
+ self.package_vers(&self.release_num("cargo"))
}
/// Returns the `version` string associated with this compiler for Rust
self.rust_info.version(self, channel::CFG_RELEASE_NUM)
}
- /// Returns the `a.b.c` version that Cargo is at.
- fn cargo_release_num(&self) -> String {
+ /// Returns the `a.b.c` version that the given package is at.
+ fn release_num(&self, package: &str) -> String {
let mut toml = String::new();
- t!(t!(File::open(self.src.join("cargo/Cargo.toml"))).read_to_string(&mut toml));
+ let toml_file_name = self.src.join(&format!("{}/Cargo.toml", package));
+ t!(t!(File::open(toml_file_name)).read_to_string(&mut toml));
for line in toml.lines() {
let prefix = "version = \"";
let suffix = "\"";
}
}
- panic!("failed to find version in cargo's Cargo.toml")
+ panic!("failed to find version in {}'s Cargo.toml", package)
}
/// Returns whether unstable features should be enabled for the compiler
}
if env::var_os("SCCACHE_ERROR_LOG").is_some() {
- cfg.env("RUST_LOG", "sccache=debug");
+ cfg.env("RUST_LOG", "sccache=info");
}
// FIXME: we don't actually need to build all LLVM tools and all LLVM
//! along with the actual implementation elsewhere. You can find more comments
//! about how to define rules themselves below.
-use std::collections::{BTreeMap, HashSet};
+use std::collections::{BTreeMap, HashSet, HashMap};
use std::mem;
use check::{self, TestKind};
//
// Tools used during the build system but not shipped
rules.build("tool-rustbook", "src/tools/rustbook")
- .dep(|s| s.name("librustc"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("librustc-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "rustbook"));
rules.build("tool-error-index", "src/tools/error_index_generator")
- .dep(|s| s.name("librustc"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("librustc-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "error_index_generator"));
rules.build("tool-tidy", "src/tools/tidy")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "tidy"));
rules.build("tool-linkchecker", "src/tools/linkchecker")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "linkchecker"));
rules.build("tool-cargotest", "src/tools/cargotest")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "cargotest"));
rules.build("tool-compiletest", "src/tools/compiletest")
- .dep(|s| s.name("libtest"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libtest-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "compiletest"));
rules.build("tool-build-manifest", "src/tools/build-manifest")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "build-manifest"));
rules.build("tool-qemu-test-server", "src/tools/qemu-test-server")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "qemu-test-server"));
rules.build("tool-qemu-test-client", "src/tools/qemu-test-client")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.run(move |s| compile::tool(build, s.stage, s.target, "qemu-test-client"));
rules.build("tool-cargo", "cargo")
- .dep(|s| s.name("libstd"))
+ .dep(|s| s.name("maybe-clean-tools"))
+ .dep(|s| s.name("libstd-tool"))
.dep(|s| s.stage(0).host(s.target).name("openssl"))
.dep(move |s| {
// Cargo depends on procedural macros, which requires a full host
.host(&build.config.build)
})
.run(move |s| compile::tool(build, s.stage, s.target, "cargo"));
+ rules.build("tool-rls", "rls")
+ .host(true)
+ .dep(|s| s.name("librustc-tool"))
+ .dep(|s| s.stage(0).host(s.target).name("openssl"))
+ .dep(move |s| {
+ // rls, like cargo, uses procedural macros
+ s.name("librustc-link")
+ .target(&build.config.build)
+ .host(&build.config.build)
+ })
+ .run(move |s| compile::tool(build, s.stage, s.target, "rls"));
+
+ // "pseudo rule" which represents completely cleaning out the tools dir in
+ // one stage. This needs to happen whenever a dependency changes (e.g.
+ // libstd, libtest, librustc) and all of the tool compilations above will
+ // be sequenced after this rule.
+ rules.build("maybe-clean-tools", "path/to/nowhere")
+ .after("librustc-tool")
+ .after("libtest-tool")
+ .after("libstd-tool");
+
+ rules.build("librustc-tool", "path/to/nowhere")
+ .dep(|s| s.name("librustc"))
+ .run(move |s| compile::maybe_clean_tools(build, s.stage, s.target, Mode::Librustc));
+ rules.build("libtest-tool", "path/to/nowhere")
+ .dep(|s| s.name("libtest"))
+ .run(move |s| compile::maybe_clean_tools(build, s.stage, s.target, Mode::Libtest));
+ rules.build("libstd-tool", "path/to/nowhere")
+ .dep(|s| s.name("libstd"))
+ .run(move |s| compile::maybe_clean_tools(build, s.stage, s.target, Mode::Libstd));
// ========================================================================
// Documentation targets
.dep(|s| s.name("default:doc"))
.run(move |s| dist::docs(build, s.stage, s.target));
rules.dist("dist-analysis", "analysis")
+ .default(build.config.extended)
.dep(|s| s.name("dist-std"))
- .default(true)
.only_host_build(true)
.run(move |s| dist::analysis(build, &s.compiler(), s.target));
+ rules.dist("dist-rls", "rls")
+ .host(true)
+ .only_host_build(true)
+ .dep(|s| s.name("tool-rls"))
+ .run(move |s| dist::rls(build, s.stage, s.target));
rules.dist("install", "path/to/nowhere")
.dep(|s| s.name("default:dist"))
.run(move |s| install::install(build, s.stage, s.target));
.dep(|d| d.name("dist-mingw"))
.dep(|d| d.name("dist-docs"))
.dep(|d| d.name("dist-cargo"))
+ .dep(|d| d.name("dist-rls"))
+ .dep(|d| d.name("dist-analysis"))
.run(move |s| dist::extended(build, s.stage, s.target));
rules.dist("dist-sign", "hash-and-sign")
/// Whether this rule is only for the build triple, not anything in hosts or
/// targets.
only_build: bool,
+
+ /// A list of "order only" dependencies. This rules does not actually
+ /// depend on these rules, but if they show up in the dependency graph then
+ /// this rule must be executed after all these rules.
+ after: Vec<&'a str>,
}
#[derive(PartialEq)]
host: false,
only_host_build: false,
only_build: false,
+ after: Vec::new(),
}
}
}
self
}
+ fn after(&mut self, step: &'a str) -> &mut Self {
+ self.rule.after.push(step);
+ self
+ }
+
fn run<F>(&mut self, f: F) -> &mut Self
where F: Fn(&Step<'a>) + 'a,
{
/// From the top level targets `steps` generate a topological ordering of
/// all steps needed to run those steps.
fn expand(&self, steps: &[Step<'a>]) -> Vec<Step<'a>> {
+ // First up build a graph of steps and their dependencies. The `nodes`
+ // map is a map from step to a unique number. The `edges` map is a
+ // map from these unique numbers to a list of other numbers,
+ // representing dependencies.
+ let mut nodes = HashMap::new();
+ nodes.insert(Step::noop(), 0);
+ let mut edges = HashMap::new();
+ edges.insert(0, HashSet::new());
+ for step in steps {
+ self.build_graph(step.clone(), &mut nodes, &mut edges);
+ }
+
+ // Now that we've built up the actual dependency graph, draw more
+ // dependency edges to satisfy the `after` dependencies field for each
+ // rule.
+ self.satisfy_after_deps(&nodes, &mut edges);
+
+ // And finally, perform a topological sort to return a list of steps to
+ // execute.
let mut order = Vec::new();
- let mut added = HashSet::new();
- added.insert(Step::noop());
- for step in steps.iter().cloned() {
- self.fill(step, &mut order, &mut added);
+ let mut visited = HashSet::new();
+ visited.insert(0);
+ let idx_to_node = nodes.iter().map(|p| (*p.1, p.0)).collect::<HashMap<_, _>>();
+ for idx in 0..nodes.len() {
+ self.topo_sort(idx, &idx_to_node, &edges, &mut visited, &mut order);
}
return order
}
- /// Performs topological sort of dependencies rooted at the `step`
- /// specified, pushing all results onto the `order` vector provided.
- ///
- /// In other words, when this method returns, the `order` vector will
- /// contain a list of steps which if executed in order will eventually
- /// complete the `step` specified as well.
+ /// Builds the dependency graph rooted at `step`.
///
- /// The `added` set specified here is the set of steps that are already
- /// present in `order` (and hence don't need to be added again).
- fn fill(&self,
- step: Step<'a>,
- order: &mut Vec<Step<'a>>,
- added: &mut HashSet<Step<'a>>) {
- if !added.insert(step.clone()) {
- return
+ /// The `nodes` and `edges` maps are filled out according to the rule
+ /// described by `step.name`.
+ fn build_graph(&self,
+ step: Step<'a>,
+ nodes: &mut HashMap<Step<'a>, usize>,
+ edges: &mut HashMap<usize, HashSet<usize>>) -> usize {
+ use std::collections::hash_map::Entry;
+
+ let idx = nodes.len();
+ match nodes.entry(step.clone()) {
+ Entry::Vacant(e) => { e.insert(idx); }
+ Entry::Occupied(e) => return *e.get(),
}
+
+ let mut deps = Vec::new();
for dep in self.rules[step.name].deps.iter() {
let dep = dep(&step);
if dep.name.starts_with("default:") {
let host = self.build.config.host.iter().any(|h| h == dep.target);
let rules = self.rules.values().filter(|r| r.default);
for rule in rules.filter(|r| r.kind == kind && (!r.host || host)) {
- self.fill(dep.name(rule.name), order, added);
+ deps.push(self.build_graph(dep.name(rule.name), nodes, edges));
}
} else {
- self.fill(dep, order, added);
+ deps.push(self.build_graph(dep, nodes, edges));
}
}
- order.push(step);
+
+ edges.entry(idx).or_insert(HashSet::new()).extend(deps);
+ return idx
+ }
+
+ /// Given a dependency graph with a finished list of `nodes`, fill out more
+ /// dependency `edges`.
+ ///
+ /// This is the step which satisfies all `after` listed dependencies in
+ /// `Rule` above.
+ fn satisfy_after_deps(&self,
+ nodes: &HashMap<Step<'a>, usize>,
+ edges: &mut HashMap<usize, HashSet<usize>>) {
+ // Reverse map from the name of a step to the node indices that it
+ // appears at.
+ let mut name_to_idx = HashMap::new();
+ for (step, &idx) in nodes {
+ name_to_idx.entry(step.name).or_insert(Vec::new()).push(idx);
+ }
+
+ for (step, idx) in nodes {
+ if *step == Step::noop() {
+ continue
+ }
+ for after in self.rules[step.name].after.iter() {
+ // This is the critical piece of an `after` dependency. If the
+ // dependency isn't actually in our graph then no edge is drawn,
+ // only if it's already present do we draw the edges.
+ if let Some(idxs) = name_to_idx.get(after) {
+ edges.get_mut(idx).unwrap()
+ .extend(idxs.iter().cloned());
+ }
+ }
+ }
+ }
+
+ fn topo_sort(&self,
+ cur: usize,
+ nodes: &HashMap<usize, &Step<'a>>,
+ edges: &HashMap<usize, HashSet<usize>>,
+ visited: &mut HashSet<usize>,
+ order: &mut Vec<Step<'a>>) {
+ if !visited.insert(cur) {
+ return
+ }
+ for dep in edges[&cur].iter() {
+ self.topo_sort(*dep, nodes, edges, visited, order);
+ }
+ order.push(nodes[&cur].clone());
}
}
ENV STAGING_DIR=/tmp
ENV RUST_CONFIGURE_ARGS \
+ --enable-extended \
--target=$TARGETS \
--musl-root-arm=/usr/local/arm-linux-musleabi \
--musl-root-armhf=/usr/local/arm-linux-musleabihf \
ENV RUST_CONFIGURE_ARGS \
--target=$TARGETS \
+ --enable-extended \
--arm-linux-androideabi-ndk=/android/ndk-arm-9 \
--armv7-linux-androideabi-ndk=/android/ndk-arm-9 \
--i686-linux-android-ndk=/android/ndk-x86-9 \
ENV TARGETS=x86_64-unknown-fuchsia
ENV TARGETS=$TARGETS,aarch64-unknown-fuchsia
-ENV RUST_CONFIGURE_ARGS --target=$TARGETS
+ENV RUST_CONFIGURE_ARGS --target=$TARGETS --enable-extended
ENV SCRIPT python2.7 ../x.py dist --target $TARGETS
ENV RUST_CONFIGURE_ARGS \
--target=i686-unknown-linux-musl,i586-unknown-linux-gnu \
- --musl-root-i686=/musl-i686
+ --musl-root-i686=/musl-i686 \
+ --enable-extended
# Newer binutils broke things on some vms/distros (i.e., linking against
# unknown relocs disabled by the following flag), so we need to go out of our
# to http://vault.centos.org/
RUN sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
RUN sed -i 's/mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo
-RUN sed -i 's/#\(baseurl.*\)mirror.centos.org/\1107.158.252.35/' /etc/yum.repos.d/*.repo
+RUN sed -i 's|#\(baseurl.*\)mirror.centos.org/centos/$releasever|\1vault.centos.org/5.11|' /etc/yum.repos.d/*.repo
RUN yum upgrade -y && yum install -y \
curl \
# to http://vault.centos.org/
RUN sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
RUN sed -i 's/mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo
-RUN sed -i 's/#\(baseurl.*\)mirror.centos.org/\1107.158.252.35/' /etc/yum.repos.d/*.repo
+RUN sed -i 's|#\(baseurl.*\)mirror.centos.org/centos/$releasever|\1vault.centos.org/5.11|' /etc/yum.repos.d/*.repo
RUN yum upgrade -y && yum install -y \
curl \
ENV RUST_CONFIGURE_ARGS \
--target=x86_64-unknown-linux-musl \
- --musl-root-x86_64=/musl-x86_64
+ --musl-root-x86_64=/musl-x86_64 \
+ --enable-extended
# Newer binutils broke things on some vms/distros (i.e., linking against
# unknown relocs disabled by the following flag), so we need to go out of our
args="$args --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID"
args="$args --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
args="$args --env SCCACHE_ERROR_LOG=/tmp/sccache/sccache.log"
- args="$args --env SCCACHE_LOG_LEVEL=debug"
args="$args --volume $objdir/tmp:/tmp/sccache"
else
mkdir -p $HOME/.cache/sccache
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
else
+ # Ignore errors while gathering information about the possible brokenness
+ # of the git repo since our gathered info will tell us something is wrong
set +o errexit
stat_lines=$(cd "$cache_src_dir" && git status --porcelain | wc -l)
stat_ec=$(cd "$cache_src_dir" && git status >/dev/null 2>&1; echo $?)
if [ "$DEPLOY$DEPLOY_ALT" != "" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --release-channel=nightly"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-static-stdcpp"
- RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-save-analysis"
if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions"
-Subproject commit a2c56870d4dc589237102cc5e0fe7b9ebd0d14a1
+Subproject commit beea82b9230cd641dd1ca263cf31025ace4aebb5
[ ':' bound-list ] [ '->' type ]
lifetime-list := lifetime | lifetime ',' lifetime-list
arg-list := ident ':' type | ident ':' type ',' arg-list
-bound-list := bound | bound '+' bound-list
-bound := path | lifetime
```
### Never type
**FIXME:** grammar?
+### Type parameter bounds
+
+```antlr
+bound := ty_bound | lt_bound
+lt_bound := lifetime
+ty_bound := [?] [ for<lt_param_defs> ] simple_path
+bound-list := bound | bound '+' bound-list '+' ?
+```
+
### Self types
**FIXME:** grammar?
-Subproject commit acedc32cacae80cf2f4925753a4ce7f7ffd7c86a
+Subproject commit b060f732145f2fa16df84c74e511df08a3a47c5d
- [future_atomic_orderings](future-atomic-orderings.md)
- [generic_param_attrs](generic-param-attrs.md)
- [get_type_id](get-type-id.md)
+- [global_asm](global_asm.md)
- [heap_api](heap-api.md)
- [i128](i128.md)
- [i128_type](i128-type.md)
- [lookup_host](lookup-host.md)
- [loop_break_value](loop-break-value.md)
- [macro_reexport](macro-reexport.md)
+- [macro_vis_matcher](macro-vis-matcher.md)
- [main](main.md)
+- [manually_drop](manually-drop.md)
- [map_entry_recover_keys](map-entry-recover-keys.md)
- [mpsc_select](mpsc-select.md)
- [n16](n16.md)
- [optin_builtin_traits](optin-builtin-traits.md)
- [option_entry](option-entry.md)
- [osstring_shrink_to_fit](osstring-shrink-to-fit.md)
+- [overlapping_marker_traits](overlapping-marker-traits.md)
- [panic_abort](panic-abort.md)
- [panic_runtime](panic-runtime.md)
- [panic_unwind](panic-unwind.md)
- [thread_local](thread-local.md)
- [thread_local_internals](thread-local-internals.md)
- [thread_local_state](thread-local-state.md)
+- [toowned_clone_into](toowned-clone-into.md)
- [trace_macros](trace-macros.md)
- [trusted_len](trusted-len.md)
- [try_from](try-from.md)
unsafe { libc::malloc(size as libc::size_t) as *mut u8 }
}
+#[no_mangle]
+pub extern fn __rust_allocate_zeroed(size: usize, _align: usize) -> *mut u8 {
+ unsafe { libc::calloc(size as libc::size_t, 1) as *mut u8 }
+}
+
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, _old_size: usize, _align: usize) {
unsafe { libc::free(ptr as *mut libc::c_void) }
[llvm-docs]: http://llvm.org/docs/LangRef.html#inline-assembler-expressions
+If you need more power and don't mind losing some of the niceties of
+`asm!`, check out [global_asm](global_asm.html).
--- /dev/null
+# `global_asm`
+
+The tracking issue for this feature is: [#35119]
+
+[#35119]: https://github.com/rust-lang/rust/issues/35119
+
+------------------------
+
+The `global_asm!` macro allows the programmer to write arbitrary
+assembly outside the scope of a function body, passing it through
+`rustc` and `llvm` to the assembler. The macro is a no-frills
+interface to LLVM's concept of [module-level inline assembly]. That is,
+all caveats applicable to LLVM's module-level inline assembly apply
+to `global_asm!`.
+
+[module-level inline assembly]: http://llvm.org/docs/LangRef.html#module-level-inline-assembly
+
+`global_asm!` fills a role not currently satisfied by either `asm!`
+or `#[naked]` functions. The programmer has _all_ features of the
+assembler at their disposal. The linker will expect to resolve any
+symbols defined in the inline assembly, modulo any symbols marked as
+external. It also means syntax for directives and assembly follow the
+conventions of the assembler in your toolchain.
+
+A simple usage looks like this:
+
+```rust,ignore
+# #![feature(global_asm)]
+# you also need relevant target_arch cfgs
+global_asm!(include_str!("something_neato.s"));
+```
+
+And a more complicated usage looks like this:
+
+```rust,ignore
+# #![feature(global_asm)]
+# #![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+
+pub mod sally {
+ global_asm!(r#"
+ .global foo
+ foo:
+ jmp baz
+ "#);
+
+ #[no_mangle]
+ pub unsafe extern "C" fn baz() {}
+}
+
+// the symbols `foo` and `bar` are global, no matter where
+// `global_asm!` was used.
+extern "C" {
+ fn foo();
+ fn bar();
+}
+
+pub mod harry {
+ global_asm!(r#"
+ .global bar
+ bar:
+ jmp quux
+ "#);
+
+ #[no_mangle]
+ pub unsafe extern "C" fn quux() {}
+}
+```
+
+You may use `global_asm!` multiple times, anywhere in your crate, in
+whatever way suits you. The effect is as if you concatenated all
+usages and placed the larger, single usage in the crate root.
+
+------------------------
+
+If you don't need quite as much power and flexibility as
+`global_asm!` provides, and you don't mind restricting your inline
+assembly to `fn` bodies only, you might try the [asm](asm.html)
+feature instead.
--- /dev/null
+# `macro_vis_matcher`
+
+The tracking issue for this feature is: [#41022]
+
+With this feature gate enabled, the [list of fragment specifiers][frags] gains one more entry:
+
+* `vis`: a visibility qualifier. Examples: nothing (default visibility); `pub`; `pub(crate)`.
+
+A `vis` variable may be followed by a comma, ident, type, or path.
+
+[#41022]: https://github.com/rust-lang/rust/issues/41022
+[frags]: ../book/first-edition/macros.html#syntactic-requirements
+
+------------------------
--- /dev/null
+# `overlapping_marker_traits`
+
+The tracking issue for this feature is: [#29864]
+
+[#29864]: https://github.com/rust-lang/rust/issues/29864
+
+------------------------
--- /dev/null
+# `toowned_clone_into`
+
+The tracking issue for this feature is: [#41263]
+
+[#41263]: https://github.com/rust-lang/rust/issues/41263
+
+------------------------
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
-/// A weak version of [`Arc`][arc].
+/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
+/// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
+/// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
///
-/// `Weak` pointers do not count towards determining if the inner value
-/// should be dropped.
+/// Since a `Weak` reference does not count towards ownership, it will not
+/// prevent the inner value from being dropped, and `Weak` itself makes no
+/// guarantees about the value still being present and may return [`None`]
+/// when [`upgrade`]d.
///
-/// The typical way to obtain a `Weak` pointer is to call
-/// [`Arc::downgrade`][downgrade].
+/// A `Weak` pointer is useful for keeping a temporary reference to the value
+/// within [`Arc`] without extending its lifetime. It is also used to prevent
+/// circular references between [`Arc`] pointers, since mutual owning references
+/// would never allow either [`Arc`] to be dropped. For example, a tree could
+/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
+/// pointers from children back to their parents.
///
-/// See the [`Arc`][arc] documentation for more details.
+/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
///
-/// [arc]: struct.Arc.html
-/// [downgrade]: struct.Arc.html#method.downgrade
+/// [`Arc`]: struct.Arc.html
+/// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
+/// [`upgrade`]: struct.Weak.html#method.upgrade
+/// [`Option`]: ../../std/option/enum.Option.html
+/// [`None`]: ../../std/option/enum.Option.html#variant.None
#[stable(feature = "arc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
}
impl<T> Weak<T> {
- /// Constructs a new `Weak<T>`, without an accompanying instance of `T`.
- ///
- /// This allocates memory for `T`, but does not initialize it. Calling
- /// [`upgrade`][upgrade] on the return value always gives
- /// [`None`][option].
+ /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
+ /// it. Calling [`upgrade`] on the return value always gives [`None`].
///
- /// [upgrade]: struct.Weak.html#method.upgrade
- /// [option]: ../../std/option/enum.Option.html
+ /// [`upgrade`]: struct.Weak.html#method.upgrade
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
}
impl<T: ?Sized> Weak<T> {
- /// Upgrades the `Weak` pointer to an [`Arc`][arc], if possible.
+ /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending
+ /// the lifetime of the value if successful.
///
- /// Returns [`None`][option] if the strong count has reached zero and the
- /// inner value was destroyed.
+ /// Returns [`None`] if the value has since been dropped.
///
- /// [arc]: struct.Arc.html
- /// [option]: ../../std/option/enum.Option.html
+ /// [`Arc`]: struct.Arc.html
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
#[stable(feature = "arc_weak", since = "1.4.0")]
impl<T: ?Sized> Clone for Weak<T> {
- /// Makes a clone of the `Weak` pointer.
- ///
- /// This creates another pointer to the same inner value, increasing the
- /// weak reference count.
+ /// Makes a clone of the `Weak` pointer that points to the same value.
///
/// # Examples
///
#[stable(feature = "downgraded_weak", since = "1.10.0")]
impl<T> Default for Weak<T> {
- /// Constructs a new `Weak<T>`, without an accompanying instance of `T`.
+ /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
+ /// it. Calling [`upgrade`] on the return value always gives [`None`].
///
- /// This allocates memory for `T`, but does not initialize it. Calling
- /// [`upgrade`][upgrade] on the return value always gives
- /// [`None`][option].
- ///
- /// [upgrade]: struct.Weak.html#method.upgrade
- /// [option]: ../../std/option/enum.Option.html
+ /// [`upgrade`]: struct.Weak.html#method.upgrade
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
impl<T: ?Sized> Drop for Weak<T> {
/// Drops the `Weak` pointer.
///
- /// This will decrement the weak reference count.
- ///
/// # Examples
///
/// ```
extern "C" {
#[allocator]
fn __rust_allocate(size: usize, align: usize) -> *mut u8;
+ fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8;
fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize);
fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8;
fn __rust_reallocate_inplace(ptr: *mut u8,
__rust_allocate(size, align)
}
+/// Return a pointer to `size` bytes of memory aligned to `align` and
+/// initialized to zeroes.
+///
+/// On failure, return a null pointer.
+///
+/// Behavior is undefined if the requested size is 0 or the alignment is not a
+/// power of 2. The alignment must be no larger than the largest supported page
+/// size on the platform.
+#[inline]
+pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
+ check_size_and_alignment(size, align);
+ __rust_allocate_zeroed(size, align)
+}
+
/// Resize the allocation referenced by `ptr` to `size` bytes.
///
/// On failure, return a null pointer and leave the original allocation intact.
use boxed::Box;
use heap;
+ #[test]
+ fn allocate_zeroed() {
+ unsafe {
+ let size = 1024;
+ let ptr = heap::allocate_zeroed(size, 1);
+ if ptr.is_null() {
+ ::oom()
+ }
+
+ let end = ptr.offset(size as isize);
+ let mut i = ptr;
+ while i < end {
+ assert_eq!(*i, 0);
+ i = i.offset(1);
+ }
+ heap::deallocate(ptr, size, 1);
+ }
+ }
+
#[test]
fn basic_reallocate_inplace_noop() {
unsafe {
/// # Aborts
///
/// Aborts on OOM
+ #[inline]
pub fn with_capacity(cap: usize) -> Self {
+ RawVec::allocate(cap, false)
+ }
+
+ /// Like `with_capacity` but guarantees the buffer is zeroed.
+ #[inline]
+ pub fn with_capacity_zeroed(cap: usize) -> Self {
+ RawVec::allocate(cap, true)
+ }
+
+ fn allocate(cap: usize, zeroed: bool) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
heap::EMPTY as *mut u8
} else {
let align = mem::align_of::<T>();
- let ptr = heap::allocate(alloc_size, align);
+ let ptr = if zeroed {
+ heap::allocate_zeroed(alloc_size, align)
+ } else {
+ heap::allocate(alloc_size, align)
+ };
if ptr.is_null() {
oom()
}
}
}
-/// A weak version of [`Rc`][rc].
+/// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
+/// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
+/// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
///
-/// `Weak` pointers do not count towards determining if the inner value
-/// should be dropped.
+/// Since a `Weak` reference does not count towards ownership, it will not
+/// prevent the inner value from being dropped, and `Weak` itself makes no
+/// guarantees about the value still being present and may return [`None`]
+/// when [`upgrade`]d.
///
-/// The typical way to obtain a `Weak` pointer is to call
-/// [`Rc::downgrade`][downgrade].
+/// A `Weak` pointer is useful for keeping a temporary reference to the value
+/// within [`Rc`] without extending its lifetime. It is also used to prevent
+/// circular references between [`Rc`] pointers, since mutual owning references
+/// would never allow either [`Arc`] to be dropped. For example, a tree could
+/// have strong [`Rc`] pointers from parent nodes to children, and `Weak`
+/// pointers from children back to their parents.
///
-/// See the [module-level documentation](./index.html) for more details.
+/// The typical way to obtain a `Weak` pointer is to call [`Rc::downgrade`].
///
-/// [rc]: struct.Rc.html
-/// [downgrade]: struct.Rc.html#method.downgrade
+/// [`Rc`]: struct.Rc.html
+/// [`Rc::downgrade`]: struct.Rc.html#method.downgrade
+/// [`upgrade`]: struct.Weak.html#method.upgrade
+/// [`Option`]: ../../std/option/enum.Option.html
+/// [`None`]: ../../std/option/enum.Option.html#variant.None
#[stable(feature = "rc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<RcBox<T>>,
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
impl<T> Weak<T> {
- /// Constructs a new `Weak<T>`, without an accompanying instance of `T`.
- ///
- /// This allocates memory for `T`, but does not initialize it. Calling
- /// [`upgrade`][upgrade] on the return value always gives
- /// [`None`][option].
+ /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
+ /// it. Calling [`upgrade`] on the return value always gives [`None`].
///
- /// [upgrade]: struct.Weak.html#method.upgrade
- /// [option]: ../../std/option/enum.Option.html
+ /// [`upgrade`]: struct.Weak.html#method.upgrade
+ /// [`None`]: ../../std/option/enum.Option.html
///
/// # Examples
///
}
impl<T: ?Sized> Weak<T> {
- /// Upgrades the `Weak` pointer to an [`Rc`][rc], if possible.
+ /// Attempts to upgrade the `Weak` pointer to an [`Rc`], extending
+ /// the lifetime of the value if successful.
///
- /// Returns [`None`][option] if the strong count has reached zero and the
- /// inner value was destroyed.
+ /// Returns [`None`] if the value has since been dropped.
///
- /// [rc]: struct.Rc.html
- /// [option]: ../../std/option/enum.Option.html
+ /// [`Rc`]: struct.Rc.html
+ /// [`None`]: ../../std/option/enum.Option.html
///
/// # Examples
///
impl<T: ?Sized> Drop for Weak<T> {
/// Drops the `Weak` pointer.
///
- /// This will decrement the weak reference count.
- ///
/// # Examples
///
/// ```
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized> Clone for Weak<T> {
- /// Makes a clone of the `Weak` pointer.
- ///
- /// This creates another pointer to the same inner value, increasing the
- /// weak reference count.
+ /// Makes a clone of the `Weak` pointer that points to the same value.
///
/// # Examples
///
#[stable(feature = "downgraded_weak", since = "1.10.0")]
impl<T> Default for Weak<T> {
- /// Constructs a new `Weak<T>`, without an accompanying instance of `T`.
- ///
- /// This allocates memory for `T`, but does not initialize it. Calling
- /// [`upgrade`][upgrade] on the return value always gives
- /// [`None`][option].
+ /// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
+ /// it. Calling [`upgrade`] on the return value always gives [`None`].
///
- /// [upgrade]: struct.Weak.html#method.upgrade
- /// [option]: ../../std/option/enum.Option.html
+ /// [`upgrade`]: struct.Weak.html#method.upgrade
+ /// [`None`]: ../../std/option/enum.Option.html
///
/// # Examples
///
target_os = "dragonfly", target_os = "windows", target_env = "musl"),
link_name = "je_mallocx")]
fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
+ #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+ target_os = "dragonfly", target_os = "windows", target_env = "musl"),
+ link_name = "je_calloc")]
+ fn calloc(size: size_t, flags: c_int) -> *mut c_void;
#[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
target_os = "dragonfly", target_os = "windows", target_env = "musl"),
link_name = "je_rallocx")]
fn nallocx(size: size_t, flags: c_int) -> size_t;
}
+ const MALLOCX_ZERO: c_int = 0x40;
+
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
unsafe { mallocx(size as size_t, flags) as *mut u8 }
}
+ #[no_mangle]
+ pub extern "C" fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8 {
+ if align <= MIN_ALIGN {
+ unsafe { calloc(size as size_t, 1) as *mut u8 }
+ } else {
+ let flags = align_to_flags(align) | MALLOCX_ZERO;
+ unsafe { mallocx(size as size_t, flags) as *mut u8 }
+ }
+ }
+
#[no_mangle]
pub extern "C" fn __rust_reallocate(ptr: *mut u8,
_old_size: usize,
bogus()
}
+ #[no_mangle]
+ pub extern "C" fn __rust_allocate_zeroed(_size: usize, _align: usize) -> *mut u8 {
+ bogus()
+ }
+
#[no_mangle]
pub extern "C" fn __rust_reallocate(_ptr: *mut u8,
_old_size: usize,
unsafe { imp::allocate(size, align) }
}
+#[no_mangle]
+pub extern "C" fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8 {
+ unsafe { imp::allocate_zeroed(size, align) }
+}
+
#[no_mangle]
pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
unsafe { imp::deallocate(ptr, old_size, align) }
}
}
+ pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
+ if align <= MIN_ALIGN {
+ libc::calloc(size as libc::size_t, 1) as *mut u8
+ } else {
+ let ptr = aligned_malloc(size, align);
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, size);
+ }
+ ptr
+ }
+ }
+
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8
#[repr(C)]
struct Header(*mut u8);
+
+ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
const HEAP_REALLOC_IN_PLACE_ONLY: DWORD = 0x00000010;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
aligned
}
- pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
+ #[inline]
+ unsafe fn allocate_with_flags(size: usize, align: usize, flags: DWORD) -> *mut u8 {
if align <= MIN_ALIGN {
- HeapAlloc(GetProcessHeap(), 0, size as SIZE_T) as *mut u8
+ HeapAlloc(GetProcessHeap(), flags, size as SIZE_T) as *mut u8
} else {
- let ptr = HeapAlloc(GetProcessHeap(), 0, (size + align) as SIZE_T) as *mut u8;
+ let ptr = HeapAlloc(GetProcessHeap(), flags, (size + align) as SIZE_T) as *mut u8;
if ptr.is_null() {
return ptr;
}
}
}
+ pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
+ allocate_with_flags(size, align, 0)
+ }
+
+ pub unsafe fn allocate_zeroed(size: usize, align: usize) -> *mut u8 {
+ allocate_with_flags(size, align, HEAP_ZERO_MEMORY)
+ }
+
pub unsafe fn reallocate(ptr: *mut u8, _old_size: usize, size: usize, align: usize) -> *mut u8 {
if align <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, size as SIZE_T) as *mut u8
//!
//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
-//! It shows how to use `BinaryHeap` with custom types.
+//! It shows how to use [`BinaryHeap`] with custom types.
//!
//! [dijkstra]: http://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
//! [sssp]: http://en.wikipedia.org/wiki/Shortest_path_problem
//! [dir_graph]: http://en.wikipedia.org/wiki/Directed_graph
+//! [`BinaryHeap`]: struct.BinaryHeap.html
//!
//! ```
//! use std::cmp::Ordering;
data: Vec<T>,
}
-/// A container object that represents the result of the [`peek_mut`] method
-/// on `BinaryHeap`. See its documentation for details.
+/// Structure wrapping a mutable reference to the greatest item on a
+/// `BinaryHeap`.
+///
+/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
+/// its documentation for more.
///
/// [`peek_mut`]: struct.BinaryHeap.html#method.peek_mut
+/// [`BinaryHeap`]: struct.BinaryHeap.html
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
pub struct PeekMut<'a, T: 'a + Ord> {
heap: &'a mut BinaryHeap<T>,
/// given `BinaryHeap`. Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it requests. Therefore
- /// capacity can not be relied upon to be precisely minimal. Prefer `reserve` if future
+ /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
/// insertions are expected.
///
/// # Panics
/// assert!(heap.capacity() >= 100);
/// heap.push(4);
/// ```
+ ///
+ /// [`reserve`]: #method.reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.data.reserve_exact(additional);
}
}
-/// `BinaryHeap` iterator.
+/// An iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by the [`iter`] method on [`BinaryHeap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.BinaryHeap.html#method.iter
+/// [`BinaryHeap`]: struct.BinaryHeap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
iter: slice::Iter<'a, T>,
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for Iter<'a, T> {}
-/// An iterator that moves out of a `BinaryHeap`.
+/// An owning iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`BinaryHeap`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.BinaryHeap.html#method.into_iter
+/// [`BinaryHeap`]: struct.BinaryHeap.html
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
pub struct IntoIter<T> {
#[unstable(feature = "fused", issue = "35602")]
impl<T> FusedIterator for IntoIter<T> {}
-/// An iterator that drains a `BinaryHeap`.
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by the [`drain`] method on [`BinaryHeap`]. See its
+/// documentation for more.
+///
+/// [`drain`]: struct.BinaryHeap.html#method.drain
+/// [`BinaryHeap`]: struct.BinaryHeap.html
#[stable(feature = "drain", since = "1.6.0")]
#[derive(Debug)]
pub struct Drain<'a, T: 'a> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn to_owned(&self) -> Self::Owned;
+
+ /// Uses borrowed data to replace owned data, usually by cloning.
+ ///
+ /// This is borrow-generalized version of `Clone::clone_from`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(toowned_clone_into)]
+ /// let mut s: String = String::new();
+ /// "hello".clone_into(&mut s);
+ ///
+ /// let mut v: Vec<i32> = Vec::new();
+ /// [1, 2][..].clone_into(&mut v);
+ /// ```
+ #[unstable(feature = "toowned_clone_into",
+ reason = "recently added",
+ issue = "41263")]
+ fn clone_into(&self, target: &mut Self::Owned) {
+ *target = self.to_owned();
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
fn to_owned(&self) -> T {
self.clone()
}
+
+ fn clone_into(&self, target: &mut T) {
+ target.clone_from(self);
+ }
}
/// A clone-on-write smart pointer.
}
}
}
+
+ fn clone_from(&mut self, source: &Cow<'a, B>) {
+ if let Owned(ref mut dest) = *self {
+ if let Owned(ref o) = *source {
+ o.borrow().clone_into(dest);
+ return;
+ }
+ }
+
+ *self = source.clone();
+ }
}
impl<'a, B: ?Sized> Cow<'a, B>
}
}
-/// An iterator over a `BTreeMap`'s entries.
+/// An iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.BTreeMap.html#method.iter
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
range: Range<'a, K, V>,
}
}
-/// A mutable iterator over a `BTreeMap`'s entries.
+/// A mutable iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.BTreeMap.html#method.iter_mut
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IterMut<'a, K: 'a, V: 'a> {
length: usize,
}
-/// An owning iterator over a `BTreeMap`'s entries.
+/// An owning iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.BTreeMap.html#method.into_iter
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K, V> {
front: Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>,
}
}
-/// An iterator over a `BTreeMap`'s keys.
+/// An iterator over the keys of a `BTreeMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: struct.BTreeMap.html#method.keys
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
}
-/// An iterator over a `BTreeMap`'s values.
+/// An iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`values`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: struct.BTreeMap.html#method.values
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
}
-/// A mutable iterator over a `BTreeMap`'s values.
+/// A mutable iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: struct.BTreeMap.html#method.values_mut
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "map_values_mut", since = "1.10.0")]
#[derive(Debug)]
pub struct ValuesMut<'a, K: 'a, V: 'a> {
inner: IterMut<'a, K, V>,
}
-/// An iterator over a sub-range of `BTreeMap`'s entries.
+/// An iterator over a sub-range of entries in a `BTreeMap`.
+///
+/// This `struct` is created by the [`range`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`range`]: struct.BTreeMap.html#method.range
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct Range<'a, K: 'a, V: 'a> {
front: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
}
}
-/// A mutable iterator over a sub-range of `BTreeMap`'s entries.
+/// A mutable iterator over a sub-range of entries in a `BTreeMap`.
+///
+/// This `struct` is created by the [`range_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`range_mut`]: struct.BTreeMap.html#method.range_mut
+/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct RangeMut<'a, K: 'a, V: 'a> {
front: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
}
/// A view into a single entry in a map, which may either be vacant or occupied.
-/// This enum is constructed from the [`entry`] method on [`BTreeMap`].
+///
+/// This `enum` is constructed from the [`entry`] method on [`BTreeMap`].
///
/// [`BTreeMap`]: struct.BTreeMap.html
/// [`entry`]: struct.BTreeMap.html#method.entry
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Entry<'a, K: 'a, V: 'a> {
- /// A vacant `Entry`
+ /// A vacant entry.
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(#[stable(feature = "rust1", since = "1.0.0")]
VacantEntry<'a, K, V>),
- /// An occupied `Entry`
+ /// An occupied entry.
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(#[stable(feature = "rust1", since = "1.0.0")]
OccupiedEntry<'a, K, V>),
}
}
-/// A vacant `Entry`. It is part of the [`Entry`] enum.
+/// A view into a vacant entry in a `BTreeMap`.
+/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
}
}
-/// An occupied `Entry`. It is part of the [`Entry`] enum.
+/// A view into an occupied entry in a `BTreeMap`.
+/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
map: BTreeMap<T, ()>,
}
-/// An iterator over a `BTreeSet`'s items.
+/// An iterator over the items of a `BTreeSet`.
///
-/// This structure is created by the [`iter`] method on [`BTreeSet`].
+/// This `struct` is created by the [`iter`] method on [`BTreeSet`].
+/// See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`iter`]: struct.BTreeSet.html#method.iter
}
}
-/// An owning iterator over a `BTreeSet`'s items.
+/// An owning iterator over the items of a `BTreeSet`.
///
-/// This structure is created by the `into_iter` method on [`BTreeSet`]
-/// [`BTreeSet`] (provided by the `IntoIterator` trait).
+/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
+/// [`into_iter`]: struct.BTreeSet.html#method.into_iter
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IntoIter<T> {
iter: ::btree_map::IntoIter<T, ()>,
}
-/// An iterator over a sub-range of `BTreeSet`'s items.
+/// An iterator over a sub-range of items in a `BTreeSet`.
///
-/// This structure is created by the [`range`] method on [`BTreeSet`].
+/// This `struct` is created by the [`range`] method on [`BTreeSet`].
+/// See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`range`]: struct.BTreeSet.html#method.range
iter: ::btree_map::Range<'a, T, ()>,
}
-/// A lazy iterator producing elements in the set difference (in-order).
+/// A lazy iterator producing elements in the difference of `BTreeSet`s.
///
-/// This structure is created by the [`difference`] method on [`BTreeSet`].
+/// This `struct` is created by the [`difference`] method on [`BTreeSet`].
+/// See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`difference`]: struct.BTreeSet.html#method.difference
}
}
-/// A lazy iterator producing elements in the set symmetric difference (in-order).
+/// A lazy iterator producing elements in the symmetric difference of `BTreeSet`s.
///
-/// This structure is created by the [`symmetric_difference`] method on
-/// [`BTreeSet`].
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`BTreeSet`]. See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`symmetric_difference`]: struct.BTreeSet.html#method.symmetric_difference
}
}
-/// A lazy iterator producing elements in the set intersection (in-order).
+/// A lazy iterator producing elements in the intersection of `BTreeSet`s.
///
-/// This structure is created by the [`intersection`] method on [`BTreeSet`].
+/// This `struct` is created by the [`intersection`] method on [`BTreeSet`].
+/// See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`intersection`]: struct.BTreeSet.html#method.intersection
}
}
-/// A lazy iterator producing elements in the set union (in-order).
+/// A lazy iterator producing elements in the union of `BTreeSet`s.
///
-/// This structure is created by the [`union`] method on [`BTreeSet`].
+/// This `struct` is created by the [`union`] method on [`BTreeSet`].
+/// See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`union`]: struct.BTreeSet.html#method.union
type Item = T;
type IntoIter = IntoIter<T>;
- /// Gets an iterator for moving out the BtreeSet's contents.
+ /// Gets an iterator for moving out the `BTreeSet`'s contents.
///
/// # Examples
///
#![feature(box_patterns)]
#![feature(box_syntax)]
#![cfg_attr(not(test), feature(char_escape_debug))]
+#![cfg_attr(not(test), feature(core_float))]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(exact_size_is_empty)]
#![feature(fused)]
#![feature(generic_param_attrs)]
#![feature(heap_api)]
+#![feature(i128_type)]
#![feature(inclusive_range)]
#![feature(lang_items)]
+#![feature(manually_drop)]
#![feature(nonzero)]
#![feature(pattern)]
#![feature(placement_in)]
}
/// An endpoint of a range of keys.
+///
+/// # Examples
+///
+/// `Bound`s are range endpoints:
+///
+/// ```
+/// #![feature(collections_range)]
+///
+/// use std::collections::range::RangeArgument;
+/// use std::collections::Bound::*;
+///
+/// assert_eq!((..100).start(), Unbounded);
+/// assert_eq!((1..12).start(), Included(&1));
+/// assert_eq!((1..12).end(), Excluded(&12));
+/// ```
+///
+/// Using a tuple of `Bound`s as an argument to [`BTreeMap::range`].
+/// Note that in most cases, it's better to use range syntax (`1..5`) instead.
+///
+/// ```
+/// use std::collections::BTreeMap;
+/// use std::collections::Bound::{Excluded, Included, Unbounded};
+///
+/// let mut map = BTreeMap::new();
+/// map.insert(3, "a");
+/// map.insert(5, "b");
+/// map.insert(8, "c");
+///
+/// for (key, value) in map.range((Excluded(3), Included(8))) {
+/// println!("{}: {}", key, value);
+/// }
+///
+/// assert_eq!(Some((&3, &"a")), map.range((Unbounded, Included(5))).next());
+/// ```
+///
+/// [`BTreeMap::range`]: btree_map/struct.BTreeMap.html#method.range
#[stable(feature = "collections_bound", since = "1.17.0")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum Bound<T> {
element: T,
}
-/// An iterator over references to the elements of a `LinkedList`.
+/// An iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by the [`iter`] method on [`LinkedList`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.LinkedList.html#method.iter
+/// [`LinkedList`]: struct.LinkedList.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
head: Option<Shared<Node<T>>>,
}
}
-/// An iterator over mutable references to the elements of a `LinkedList`.
+/// A mutable iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`LinkedList`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.LinkedList.html#method.iter_mut
+/// [`LinkedList`]: struct.LinkedList.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
list: &'a mut LinkedList<T>,
}
}
-/// An iterator over the elements of a `LinkedList`.
+/// An owning iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`LinkedList`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.LinkedList.html#method.into_iter
+/// [`LinkedList`]: struct.LinkedList.html
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
/// Splits the list into two at the given index. Returns everything after the given index,
/// including the index.
///
+ /// This operation should compute in O(n) time.
+ ///
/// # Panics
///
/// Panics if `at > len`.
///
- /// This operation should compute in O(n) time.
- ///
/// # Examples
///
/// ```
fn to_owned(&self) -> Vec<T> {
panic!("not available with cfg(test)")
}
+
+ fn clone_into(&self, target: &mut Vec<T>) {
+ // drop anything in target that will not be overwritten
+ target.truncate(self.len());
+ let len = target.len();
+
+ // reuse the contained values' allocations/resources.
+ target.clone_from_slice(&self[..len]);
+
+ // target.len <= self.len due to the truncate above, so the
+ // slice here is always in-bounds.
+ target.extend_from_slice(&self[len..]);
+ }
}
////////////////////////////////////////////////////////////////////////////////
// performance than with the 2nd method.
//
// All methods were benchmarked, and the 3rd showed best results. So we chose that one.
- let mut tmp = NoDrop { value: ptr::read(&v[0]) };
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
// Intermediate state of the insertion process is always tracked by `hole`, which
// serves two purposes:
// fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
// initially held exactly once.
let mut hole = InsertionHole {
- src: &mut tmp.value,
+ src: &mut *tmp,
dest: &mut v[1],
};
ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
for i in 2..v.len() {
- if !is_less(&v[i], &tmp.value) {
+ if !is_less(&v[i], &*tmp) {
break;
}
ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
}
}
- // Holds a value, but never drops it.
- #[allow(unions_with_drop_fields)]
- union NoDrop<T> {
- value: T
- }
-
// When dropped, copies from `src` into `dest`.
struct InsertionHole<T> {
src: *mut T,
fn to_owned(&self) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
}
+
+ fn clone_into(&self, target: &mut String) {
+ let mut b = mem::replace(target, String::new()).into_bytes();
+ self.as_bytes().clone_into(&mut b);
+ *target = unsafe { String::from_utf8_unchecked(b) }
+ }
}
/// Methods for string slices.
/// Returns a subslice of `str`.
///
- /// This is the non-panicking alternative to indexing the `str`. Returns `None` whenever
- /// equivalent indexing operation would panic.
+ /// This is the non-panicking alternative to indexing the `str`. Returns
+ /// [`None`] whenever equivalent indexing operation would panic.
+ ///
+ /// [`None`]: option/enum.Option.html#variant.None
///
/// # Examples
///
/// Returns a mutable subslice of `str`.
///
- /// This is the non-panicking alternative to indexing the `str`. Returns `None` whenever
- /// equivalent indexing operation would panic.
+ /// This is the non-panicking alternative to indexing the `str`. Returns
+ /// [`None`] whenever equivalent indexing operation would panic.
+ ///
+ /// [`None`]: option/enum.Option.html#variant.None
///
/// # Examples
///
core_str::StrExt::split_at_mut(self, mid)
}
- /// Returns an iterator over the `char`s of a string slice.
+ /// Returns an iterator over the [`char`]s of a string slice.
///
/// As a string slice consists of valid UTF-8, we can iterate through a
/// string slice by [`char`]. This method returns such an iterator.
/// Parses this string slice into another type.
///
- /// Because `parse()` is so general, it can cause problems with type
- /// inference. As such, `parse()` is one of the few times you'll see
+ /// Because `parse` is so general, it can cause problems with type
+ /// inference. As such, `parse` is one of the few times you'll see
/// the syntax affectionately known as the 'turbofish': `::<>`. This
/// helps the inference algorithm understand specifically which type
/// you're trying to parse into.
///
- /// `parse()` can parse any type that implements the [`FromStr`] trait.
+ /// `parse` can parse any type that implements the [`FromStr`] trait.
///
/// [`FromStr`]: str/trait.FromStr.html
///
///
/// `replacen` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
- /// replaces them with the replacement string slice at most `N` times.
+ /// replaces them with the replacement string slice at most `count` times.
///
/// [`String`]: string/struct.String.html
///
return s;
}
- /// Escapes each char in `s` with `char::escape_debug`.
+ /// Escapes each char in `s` with [`char::escape_debug`].
+ ///
+ /// [`char::escape_debug`]: primitive.char.html#method.escape_debug
#[unstable(feature = "str_escape",
reason = "return type may change to be an iterator",
issue = "27791")]
self.chars().flat_map(|c| c.escape_debug()).collect()
}
- /// Escapes each char in `s` with `char::escape_default`.
+ /// Escapes each char in `s` with [`char::escape_default`].
+ ///
+ /// [`char::escape_default`]: primitive.char.html#method.escape_default
#[unstable(feature = "str_escape",
reason = "return type may change to be an iterator",
issue = "27791")]
self.chars().flat_map(|c| c.escape_default()).collect()
}
- /// Escapes each char in `s` with `char::escape_unicode`.
+ /// Escapes each char in `s` with [`char::escape_unicode`].
+ ///
+ /// [`char::escape_unicode`]: primitive.char.html#method.escape_unicode
#[unstable(feature = "str_escape",
reason = "return type may change to be an iterator",
issue = "27791")]
self.chars().flat_map(|c| c.escape_unicode()).collect()
}
- /// Converts a `Box<str>` into a [`String`] without copying or allocating.
+ /// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
///
/// [`String`]: string/struct.String.html
+ /// [`Box<str>`]: boxed/struct.Box.html
///
/// # Examples
///
assert_eq!("Hi, World!", owned);
assert_eq!("Hello, World!", borrowed);
}
+
+#[test]
+fn check_cow_clone_from() {
+ let mut c1: Cow<str> = Cow::Owned(String::with_capacity(25));
+ let s: String = "hi".to_string();
+ assert!(s.capacity() < 25);
+ let c2: Cow<str> = Cow::Owned(s);
+ c1.clone_from(&c2);
+ assert!(c1.into_owned().capacity() >= 25);
+}
\ No newline at end of file
use core::intrinsics::{arith_offset, assume};
use core::iter::{FromIterator, FusedIterator, TrustedLen};
use core::mem;
+#[cfg(not(test))]
+use core::num::Float;
use core::ops::{InPlace, Index, IndexMut, Place, Placer};
use core::ops;
use core::ptr;
#[doc(hidden)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
- let mut v = Vec::with_capacity(n);
- v.extend_with_element(n, elem);
- v
+ <T as SpecFromElem>::from_elem(elem, n)
+}
+
+// Specialization trait used for Vec::from_elem
+trait SpecFromElem: Sized {
+ fn from_elem(elem: Self, n: usize) -> Vec<Self>;
+}
+
+impl<T: Clone> SpecFromElem for T {
+ default fn from_elem(elem: Self, n: usize) -> Vec<Self> {
+ let mut v = Vec::with_capacity(n);
+ v.extend_with_element(n, elem);
+ v
+ }
+}
+
+impl SpecFromElem for u8 {
+ #[inline]
+ fn from_elem(elem: u8, n: usize) -> Vec<u8> {
+ if elem == 0 {
+ return Vec {
+ buf: RawVec::with_capacity_zeroed(n),
+ len: n,
+ }
+ }
+ unsafe {
+ let mut v = Vec::with_capacity(n);
+ ptr::write_bytes(v.as_mut_ptr(), elem, n);
+ v.set_len(n);
+ v
+ }
+ }
}
+macro_rules! impl_spec_from_elem {
+ ($t: ty, $is_zero: expr) => {
+ impl SpecFromElem for $t {
+ #[inline]
+ fn from_elem(elem: $t, n: usize) -> Vec<$t> {
+ if $is_zero(elem) {
+ return Vec {
+ buf: RawVec::with_capacity_zeroed(n),
+ len: n,
+ }
+ }
+ let mut v = Vec::with_capacity(n);
+ v.extend_with_element(n, elem);
+ v
+ }
+ }
+ };
+}
+
+impl_spec_from_elem!(i8, |x| x == 0);
+impl_spec_from_elem!(i16, |x| x == 0);
+impl_spec_from_elem!(i32, |x| x == 0);
+impl_spec_from_elem!(i64, |x| x == 0);
+impl_spec_from_elem!(i128, |x| x == 0);
+impl_spec_from_elem!(isize, |x| x == 0);
+
+impl_spec_from_elem!(u16, |x| x == 0);
+impl_spec_from_elem!(u32, |x| x == 0);
+impl_spec_from_elem!(u64, |x| x == 0);
+impl_spec_from_elem!(u128, |x| x == 0);
+impl_spec_from_elem!(usize, |x| x == 0);
+
+impl_spec_from_elem!(f32, |x: f32| x == 0. && x.is_sign_positive());
+impl_spec_from_elem!(f64, |x: f64| x == 0. && x.is_sign_positive());
+
////////////////////////////////////////////////////////////////////////////////
// Common trait implementations for Vec
////////////////////////////////////////////////////////////////////////////////
}
fn clone_from(&mut self, other: &Vec<T>) {
- // drop anything in self that will not be overwritten
- self.truncate(other.len());
- let len = self.len();
-
- // reuse the contained values' allocations/resources.
- self.clone_from_slice(&other[..len]);
-
- // self.len <= other.len due to the truncate above, so the
- // slice here is always in-bounds.
- self.extend_from_slice(&other[len..]);
+ other.as_slice().clone_into(self);
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! `VecDeque` is a double-ended queue, which is implemented with the help of a
-//! growing ring buffer.
+//! A double-ended queue implemented with a growable ring buffer.
//!
//! This queue has `O(1)` amortized inserts and removals from both ends of the
//! container. It also has `O(1)` indexing like a vector. The contained elements
#[cfg(target_pointer_width = "64")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of two
-/// `VecDeque` is a growable ring buffer, which can be used as a double-ended
-/// queue efficiently.
+/// A double-ended queue implemented with a growable ring buffer.
///
-/// The "default" usage of this type as a queue is to use `push_back` to add to
-/// the queue, and `pop_front` to remove from the queue. `extend` and `append`
+/// The "default" usage of this type as a queue is to use [`push_back`] to add to
+/// the queue, and [`pop_front`] to remove from the queue. [`extend`] and [`append`]
/// push onto the back in this manner, and iterating over `VecDeque` goes front
/// to back.
+///
+/// [`push_back`]: #method.push_back
+/// [`pop_front`]: #method.pop_front
+/// [`extend`]: #method.extend
+/// [`append`]: #method.append
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VecDeque<T> {
// tail and head are pointers into the buffer. Tail always points
/// given `VecDeque`. Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it requests. Therefore
- /// capacity can not be relied upon to be precisely minimal. Prefer `reserve` if future
+ /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
/// insertions are expected.
///
/// # Panics
/// buf.reserve_exact(10);
/// assert!(buf.capacity() >= 11);
/// ```
+ ///
+ /// [`reserve`]: #method.reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.reserve(additional);
(head.wrapping_sub(tail)) & (size - 1)
}
-/// `VecDeque` iterator.
+/// An iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter`] method on [`VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.VecDeque.html#method.iter
+/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ring: &'a [T],
impl<'a, T> FusedIterator for Iter<'a, T> {}
-/// `VecDeque` mutable iterator.
+/// A mutable iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.VecDeque.html#method.iter_mut
+/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ring: &'a mut [T],
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for IterMut<'a, T> {}
-/// A by-value `VecDeque` iterator
+/// An owning iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.VecDeque.html#method.into_iter
+/// [`VecDeque`]: struct.VecDeque.html
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
#[unstable(feature = "fused", issue = "35602")]
impl<T> FusedIterator for IntoIter<T> {}
-/// A draining `VecDeque` iterator
+/// A draining iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
+/// documentation for more.
+///
+/// [`drain`]: struct.VecDeque.html#method.drain
+/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, T: 'a> {
after_tail: usize,
sr = sr.wrapping_add(1);
// 1 <= sr <= u64::bits() - 1
- q = n.wrapping_shl(64u32.wrapping_sub(sr));
+ q = n.wrapping_shl(128u32.wrapping_sub(sr));
r = n.wrapping_shr(sr);
} else {
if d.high() == 0 {
/// A hashable type.
///
-/// The `H` type parameter is an abstract hash state that is used by the `Hash`
-/// to compute the hash.
+/// Types implementing `Hash` are able to be [`hash`]ed with an instance of
+/// [`Hasher`].
///
-/// If you are also implementing [`Eq`], there is an additional property that
-/// is important:
+/// ## Implementing `Hash`
///
-/// ```text
-/// k1 == k2 -> hash(k1) == hash(k2)
-/// ```
-///
-/// In other words, if two keys are equal, their hashes should also be equal.
-/// [`HashMap`] and [`HashSet`] both rely on this behavior.
+/// You can derive `Hash` with `#[derive(Hash)]` if all fields implement `Hash`.
+/// The resulting hash will be the combination of the values from calling
+/// [`hash`] on each field.
///
-/// ## Derivable
-///
-/// This trait can be used with `#[derive]` if all fields implement `Hash`.
-/// When `derive`d, the resulting hash will be the combination of the values
-/// from calling [`.hash`] on each field.
-///
-/// ## How can I implement `Hash`?
+/// ```
+/// #[derive(Hash)]
+/// struct Rustacean {
+/// name: String,
+/// country: String,
+/// }
+/// ```
///
-/// If you need more control over how a value is hashed, you need to implement
-/// the `Hash` trait:
+/// If you need more control over how a value is hashed, you can of course
+/// implement the `Hash` trait yourself:
///
/// ```
/// use std::hash::{Hash, Hasher};
/// }
/// ```
///
+/// ## `Hash` and `Eq`
+///
+/// When implementing both `Hash` and [`Eq`], it is important that the following
+/// property holds:
+///
+/// ```text
+/// k1 == k2 -> hash(k1) == hash(k2)
+/// ```
+///
+/// In other words, if two keys are equal, their hashes must also be equal.
+/// [`HashMap`] and [`HashSet`] both rely on this behavior.
+///
+/// Thankfully, you won't need to worry about upholding this property when
+/// deriving both [`Eq`] and `Hash` with `#[derive(PartialEq, Eq, Hash)]`.
+///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
+/// [`Hasher`]: trait.Hasher.html
/// [`HashMap`]: ../../std/collections/struct.HashMap.html
/// [`HashSet`]: ../../std/collections/struct.HashSet.html
-/// [`.hash`]: #tymethod.hash
+/// [`hash`]: #tymethod.hash
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Hash {
- /// Feeds this value into the state given, updating the hasher as necessary.
+ /// Feeds this value into the given [`Hasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::{Hash, Hasher};
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// 7920.hash(&mut hasher);
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ ///
+ /// [`Hasher`]: trait.Hasher.html
#[stable(feature = "rust1", since = "1.0.0")]
fn hash<H: Hasher>(&self, state: &mut H);
- /// Feeds a slice of this type into the state provided.
+ /// Feeds a slice of this type into the given [`Hasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::{Hash, Hasher};
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// let numbers = [6, 28, 496, 8128];
+ /// Hash::hash_slice(&numbers, &mut hasher);
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ ///
+ /// [`Hasher`]: trait.Hasher.html
#[stable(feature = "hash_slice", since = "1.3.0")]
fn hash_slice<H: Hasher>(data: &[Self], state: &mut H)
where Self: Sized
}
}
-/// A trait which represents the ability to hash an arbitrary stream of bytes.
+/// A trait for hashing an arbitrary stream of bytes.
+///
+/// Instances of `Hasher` usually represent state that is changed while hashing
+/// data.
+///
+/// `Hasher` provides a fairly basic interface for retrieving the generated hash
+/// (with [`finish`]), and writing integers as well as slices of bytes into an
+/// instance (with [`write`] and [`write_u8`] etc.). Most of the time, `Hasher`
+/// instances are used in conjunction with the [`Hash`] trait.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::DefaultHasher;
+/// use std::hash::Hasher;
+///
+/// let mut hasher = DefaultHasher::new();
+///
+/// hasher.write_u32(1989);
+/// hasher.write_u8(11);
+/// hasher.write_u8(9);
+/// hasher.write(b"Huh?");
+///
+/// println!("Hash is {:x}!", hasher.finish());
+/// ```
+///
+/// [`Hash`]: trait.Hash.html
+/// [`finish`]: #tymethod.finish
+/// [`write`]: #tymethod.write
+/// [`write_u8`]: #method.write_u8
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Hasher {
/// Completes a round of hashing, producing the output hash generated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// hasher.write(b"Cool!");
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn finish(&self) -> u64;
/// Writes some data into this `Hasher`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// let data = [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
+ ///
+ /// hasher.write(&data);
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write(&mut self, bytes: &[u8]);
- /// Write a single `u8` into this hasher.
+ /// Writes a single `u8` into this hasher.
#[inline]
#[stable(feature = "hasher_write", since = "1.3.0")]
fn write_u8(&mut self, i: u8) {
}
}
-/// A `BuildHasher` is typically used as a factory for instances of `Hasher`
-/// which a `HashMap` can then use to hash keys independently.
+/// A trait for creating instances of [`Hasher`].
+///
+/// A `BuildHasher` is typically used (e.g. by [`HashMap`]) to create
+/// [`Hasher`]s for each key such that they are hashed independently of one
+/// another, since [`Hasher`]s contain state.
+///
+/// For each instance of `BuildHasher`, the [`Hasher`]s created by
+/// [`build_hasher`] should be identical. That is, if the same stream of bytes
+/// is fed into each hasher, the same output will also be generated.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::RandomState;
+/// use std::hash::{BuildHasher, Hasher};
+///
+/// let s = RandomState::new();
+/// let mut hasher_1 = s.build_hasher();
+/// let mut hasher_2 = s.build_hasher();
///
-/// Note that for each instance of `BuildHasher`, the created hashers should be
-/// identical. That is, if the same stream of bytes is fed into each hasher, the
-/// same output will also be generated.
+/// hasher_1.write_u32(8128);
+/// hasher_2.write_u32(8128);
+///
+/// assert_eq!(hasher_1.finish(), hasher_2.finish());
+/// ```
+///
+/// [`build_hasher`]: #tymethod.build_hasher
+/// [`Hasher`]: trait.Hasher.html
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
#[stable(since = "1.7.0", feature = "build_hasher")]
pub trait BuildHasher {
/// Type of the hasher that will be created.
/// Creates a new hasher.
///
+ /// Each call to `build_hasher` on the same instance should produce identical
+ /// [`Hasher`]s.
+ ///
/// # Examples
///
/// ```
/// let s = RandomState::new();
/// let new_s = s.build_hasher();
/// ```
+ ///
+ /// [`Hasher`]: trait.Hasher.html
#[stable(since = "1.7.0", feature = "build_hasher")]
fn build_hasher(&self) -> Self::Hasher;
}
-/// The `BuildHasherDefault` structure is used in scenarios where one has a
-/// type that implements [`Hasher`] and [`Default`], but needs that type to
-/// implement [`BuildHasher`].
+/// Used to create a default [`BuildHasher`] instance for types that implement
+/// [`Hasher`] and [`Default`].
///
-/// This structure is zero-sized and does not need construction.
+/// `BuildHasherDefault<H>` can be used when a type `H` implements [`Hasher`] and
+/// [`Default`], and you need a corresponding [`BuildHasher`] instance, but none is
+/// defined.
+///
+/// Any `BuildHasherDefault` is [zero-sized]. It can be created with
+/// [`default`][method.Default]. When using `BuildHasherDefault` with [`HashMap`] or
+/// [`HashSet`], this doesn't need to be done, since they implement appropriate
+/// [`Default`] instances themselves.
///
/// # Examples
///
///
/// [`BuildHasher`]: trait.BuildHasher.html
/// [`Default`]: ../default/trait.Default.html
+/// [method.default]: #method.default
/// [`Hasher`]: trait.Hasher.html
/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+/// [`HashSet`]: ../../std/collections/struct.HashSet.html
+/// [zero-sized]: https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts
#[stable(since = "1.7.0", feature = "build_hasher")]
pub struct BuildHasherDefault<H>(marker::PhantomData<H>);
/// initialize memory previous set to the result of `uninit`.
pub fn uninit<T>() -> T;
- /// Moves a value out of scope without running drop glue.
- pub fn forget<T>(_: T) -> ();
-
/// Reinterprets the bits of a value of one type as another type.
///
/// Both types must have the same size. Neither the original, nor the result,
/// Stopping at the first `true`:
///
/// ```
- /// let a = [1, 2, 3];
+ /// let a = [1, 2, 3, 4];
///
/// let mut iter = a.iter();
///
- /// assert_eq!(iter.position(|&x| x == 2), Some(1));
+ /// assert_eq!(iter.position(|&x| x >= 2), Some(1));
///
/// // we can still use `iter`, as there are more elements.
/// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // The returned index depends on iterator state
+ /// assert_eq!(iter.position(|&x| x == 4), Some(0));
+ ///
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
}
}
-/// Select an element from an iterator based on the given projection
+/// Select an element from an iterator based on the given "projection"
/// and "comparison" function.
///
/// This is an idiosyncratic helper to try to factor out the
/// commonalities of {max,min}{,_by}. In particular, this avoids
/// having to implement optimizations several times.
#[inline]
-fn select_fold1<I,B, FProj, FCmp>(mut it: I,
- mut f_proj: FProj,
- mut f_cmp: FCmp) -> Option<(B, I::Item)>
+fn select_fold1<I, B, FProj, FCmp>(mut it: I,
+ mut f_proj: FProj,
+ mut f_cmp: FCmp) -> Option<(B, I::Item)>
where I: Iterator,
FProj: FnMut(&I::Item) -> B,
FCmp: FnMut(&B, &I::Item, &B, &I::Item) -> bool
for x in it {
let x_p = f_proj(&x);
- if f_cmp(&sel_p, &sel, &x_p, &x) {
+ if f_cmp(&sel_p, &sel, &x_p, &x) {
sel = x;
sel_p = x_p;
}
#![feature(allow_internal_unstable)]
#![feature(asm)]
#![feature(associated_type_defaults)]
+#![feature(associated_consts)]
#![feature(cfg_target_feature)]
#![feature(cfg_target_has_atomic)]
#![feature(concat_idents)]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn forget<T>(t: T) {
- unsafe { intrinsics::forget(t) }
+ ManuallyDrop::new(t);
}
/// Returns the size of a type in bytes.
}
}
+
+/// A wrapper to inhibit compiler from automatically calling `T`’s destructor.
+///
+/// This wrapper is 0-cost.
+///
+/// # Examples
+///
+/// This wrapper helps with explicitly documenting the drop order dependencies between fields of
+/// the type:
+///
+/// ```rust
+/// # #![feature(manually_drop)]
+/// use std::mem::ManuallyDrop;
+/// struct Peach;
+/// struct Banana;
+/// struct Melon;
+/// struct FruitBox {
+/// // Immediately clear there’s something non-trivial going on with these fields.
+/// peach: ManuallyDrop<Peach>,
+/// melon: Melon, // Field that’s independent of the other two.
+/// banana: ManuallyDrop<Banana>,
+/// }
+///
+/// impl Drop for FruitBox {
+/// fn drop(&mut self) {
+/// unsafe {
+/// // Explicit ordering in which field destructors are run specified in the intuitive
+/// // location – the destructor of the structure containing the fields.
+/// // Moreover, one can now reorder fields within the struct however much they want.
+/// ManuallyDrop::drop(&mut self.peach);
+/// ManuallyDrop::drop(&mut self.banana);
+/// }
+/// // After destructor for `FruitBox` runs (this function), the destructor for Melon gets
+/// // invoked in the usual manner, as it is not wrapped in `ManuallyDrop`.
+/// }
+/// }
+/// ```
+#[unstable(feature = "manually_drop", issue = "40673")]
+#[allow(unions_with_drop_fields)]
+pub union ManuallyDrop<T>{ value: T }
+
+impl<T> ManuallyDrop<T> {
+ /// Wrap a value to be manually dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![feature(manually_drop)]
+ /// use std::mem::ManuallyDrop;
+ /// ManuallyDrop::new(Box::new(()));
+ /// ```
+ #[unstable(feature = "manually_drop", issue = "40673")]
+ #[inline]
+ pub fn new(value: T) -> ManuallyDrop<T> {
+ ManuallyDrop { value: value }
+ }
+
+ /// Extract the value from the ManuallyDrop container.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![feature(manually_drop)]
+ /// use std::mem::ManuallyDrop;
+ /// let x = ManuallyDrop::new(Box::new(()));
+ /// let _: Box<()> = ManuallyDrop::into_inner(x);
+ /// ```
+ #[unstable(feature = "manually_drop", issue = "40673")]
+ #[inline]
+ pub fn into_inner(slot: ManuallyDrop<T>) -> T {
+ unsafe {
+ slot.value
+ }
+ }
+
+ /// Manually drops the contained value.
+ ///
+ /// # Unsafety
+ ///
+ /// This function runs the destructor of the contained value and thus the wrapped value
+ /// now represents uninitialized data. It is up to the user of this method to ensure the
+ /// uninitialized data is not actually used.
+ #[unstable(feature = "manually_drop", issue = "40673")]
+ #[inline]
+ pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {
+ ptr::drop_in_place(&mut slot.value)
+ }
+}
+
+#[unstable(feature = "manually_drop", issue = "40673")]
+impl<T> ::ops::Deref for ManuallyDrop<T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ unsafe {
+ &self.value
+ }
+ }
+}
+
+#[unstable(feature = "manually_drop", issue = "40673")]
+impl<T> ::ops::DerefMut for ManuallyDrop<T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe {
+ &mut self.value
+ }
+ }
+}
+
+#[unstable(feature = "manually_drop", issue = "40673")]
+impl<T: ::fmt::Debug> ::fmt::Debug for ManuallyDrop<T> {
+ fn fmt(&self, fmt: &mut ::fmt::Formatter) -> ::fmt::Result {
+ unsafe {
+ fmt.debug_tuple("ManuallyDrop").field(&self.value).finish()
+ }
+ }
+}
/// a bignum.
pub fn fast_path<T: RawFloat>(integral: &[u8], fractional: &[u8], e: i64) -> Option<T> {
let num_digits = integral.len() + fractional.len();
- // log_10(f64::max_sig) ~ 15.95. We compare the exact value to max_sig near the end,
+ // log_10(f64::MAX_SIG) ~ 15.95. We compare the exact value to MAX_SIG near the end,
// this is just a quick, cheap rejection (and also frees the rest of the code from
// worrying about underflow).
if num_digits > 16 {
return None;
}
- if e.abs() >= T::ceil_log5_of_max_sig() as i64 {
+ if e.abs() >= T::CEIL_LOG5_OF_MAX_SIG as i64 {
return None;
}
let f = num::from_str_unchecked(integral.iter().chain(fractional.iter()));
- if f > T::max_sig() {
+ if f > T::MAX_SIG {
return None;
}
/// > the best possible approximation that uses p bits of significand.)
pub fn bellerophon<T: RawFloat>(f: &Big, e: i16) -> T {
let slop;
- if f <= &Big::from_u64(T::max_sig()) {
+ if f <= &Big::from_u64(T::MAX_SIG) {
// The cases abs(e) < log5(2^N) are in fast_path()
slop = if e >= 0 { 0 } else { 3 };
} else {
slop = if e >= 0 { 1 } else { 4 };
}
let z = rawfp::big_to_fp(f).mul(&power_of_ten(e)).normalize();
- let exp_p_n = 1 << (P - T::sig_bits() as u32);
+ let exp_p_n = 1 << (P - T::SIG_BITS as u32);
let lowbits: i64 = (z.f % exp_p_n) as i64;
// Is the slop large enough to make a difference when
// rounding to n bits?
if d2 < y {
let mut d2_double = d2;
d2_double.mul_pow2(1);
- if m == T::min_sig() && d_negative && d2_double > y {
+ if m == T::MIN_SIG && d_negative && d2_double > y {
z = prev_float(z);
} else {
return z;
}
} else if d2 == y {
if m % 2 == 0 {
- if m == T::min_sig() && d_negative {
+ if m == T::MIN_SIG && d_negative {
z = prev_float(z);
} else {
return z;
quick_start::<T>(&mut u, &mut v, &mut k);
let mut rem = Big::from_small(0);
let mut x = Big::from_small(0);
- let min_sig = Big::from_u64(T::min_sig());
- let max_sig = Big::from_u64(T::max_sig());
+ let min_sig = Big::from_u64(T::MIN_SIG);
+ let max_sig = Big::from_u64(T::MAX_SIG);
loop {
u.div_rem(&v, &mut x, &mut rem);
- if k == T::min_exp_int() {
- // We have to stop at the minimum exponent, if we wait until `k < T::min_exp_int()`,
+ if k == T::MIN_EXP_INT {
+ // We have to stop at the minimum exponent, if we wait until `k < T::MIN_EXP_INT`,
// then we'd be off by a factor of two. Unfortunately this means we have to special-
// case normal numbers with the minimum exponent.
// FIXME find a more elegant formulation, but run the `tiny-pow10` test to make sure
}
return underflow(x, v, rem);
}
- if k > T::max_exp_int() {
- return T::infinity2();
+ if k > T::MAX_EXP_INT {
+ return T::INFINITY;
}
if x < min_sig {
u.mul_pow2(1);
// The target ratio is one where u/v is in an in-range significand. Thus our termination
// condition is log2(u / v) being the significand bits, plus/minus one.
// FIXME Looking at the second bit could improve the estimate and avoid some more divisions.
- let target_ratio = T::sig_bits() as i16;
+ let target_ratio = T::SIG_BITS as i16;
let log2_u = u.bit_length() as i16;
let log2_v = v.bit_length() as i16;
let mut u_shift: i16 = 0;
let mut v_shift: i16 = 0;
assert!(*k == 0);
loop {
- if *k == T::min_exp_int() {
+ if *k == T::MIN_EXP_INT {
// Underflow or subnormal. Leave it to the main function.
break;
}
- if *k == T::max_exp_int() {
+ if *k == T::MAX_EXP_INT {
// Overflow. Leave it to the main function.
break;
}
}
fn underflow<T: RawFloat>(x: Big, v: Big, rem: Big) -> T {
- if x < Big::from_u64(T::min_sig()) {
+ if x < Big::from_u64(T::MIN_SIG) {
let q = num::to_u64(&x);
let z = rawfp::encode_subnormal(q);
return round_by_remainder(v, rem, q, z);
// needs to be rounded up. Only when the rounded off bits are 1/2 and the remainder
// is zero, we have a half-to-even situation.
let bits = x.bit_length();
- let lsb = bits - T::sig_bits() as usize;
+ let lsb = bits - T::SIG_BITS as usize;
let q = num::get_bits(&x, lsb, bits);
- let k = T::min_exp_int() + lsb as i16;
+ let k = T::MIN_EXP_INT + lsb as i16;
let z = rawfp::encode_normal(Unpacked::new(q, k));
let q_even = q % 2 == 0;
match num::compare_with_half_ulp(&x, lsb) {
let (sign, s) = extract_sign(s);
let flt = match parse_decimal(s) {
ParseResult::Valid(decimal) => convert(decimal)?,
- ParseResult::ShortcutToInf => T::infinity2(),
- ParseResult::ShortcutToZero => T::zero2(),
+ ParseResult::ShortcutToInf => T::INFINITY,
+ ParseResult::ShortcutToZero => T::ZERO,
ParseResult::Invalid => match s {
- "inf" => T::infinity2(),
- "NaN" => T::nan2(),
+ "inf" => T::INFINITY,
+ "NaN" => T::NAN,
_ => { return Err(pfe_invalid()); }
}
};
// FIXME These bounds are rather conservative. A more careful analysis of the failure modes
// of Bellerophon could allow using it in more cases for a massive speed up.
let exponent_in_range = table::MIN_E <= e && e <= table::MAX_E;
- let value_in_range = upper_bound <= T::max_normal_digits() as u64;
+ let value_in_range = upper_bound <= T::MAX_NORMAL_DIGITS as u64;
if exponent_in_range && value_in_range {
Ok(algorithm::bellerophon(&f, e))
} else {
fn trivial_cases<T: RawFloat>(decimal: &Decimal) -> Option<T> {
// There were zeros but they were stripped by simplify()
if decimal.integral.is_empty() && decimal.fractional.is_empty() {
- return Some(T::zero2());
+ return Some(T::ZERO);
}
// This is a crude approximation of ceil(log10(the real value)). We don't need to worry too
// much about overflow here because the input length is tiny (at least compared to 2^64) and
// the parser already handles exponents whose absolute value is greater than 10^18
// (which is still 10^19 short of 2^64).
let max_place = decimal.exp + decimal.integral.len() as i64;
- if max_place > T::inf_cutoff() {
- return Some(T::infinity2());
- } else if max_place < T::zero_cutoff() {
- return Some(T::zero2());
+ if max_place > T::INF_CUTOFF {
+ return Some(T::INFINITY);
+ } else if max_place < T::ZERO_CUTOFF {
+ return Some(T::ZERO);
}
None
}
///
/// Should **never ever** be implemented for other types or be used outside the dec2flt module.
/// Inherits from `Float` because there is some overlap, but all the reused methods are trivial.
-/// The "methods" (pseudo-constants) with default implementation should not be overriden.
pub trait RawFloat : Float + Copy + Debug + LowerExp
+ Mul<Output=Self> + Div<Output=Self> + Neg<Output=Self>
{
- // suffix of "2" because Float::infinity is deprecated
- #[allow(deprecated)]
- fn infinity2() -> Self {
- Float::infinity()
- }
-
- // suffix of "2" because Float::nan is deprecated
- #[allow(deprecated)]
- fn nan2() -> Self {
- Float::nan()
- }
-
- // suffix of "2" because Float::zero is deprecated
- fn zero2() -> Self;
+ const INFINITY: Self;
+ const NAN: Self;
+ const ZERO: Self;
// suffix of "2" because Float::integer_decode is deprecated
#[allow(deprecated)]
/// represented, the other code in this module makes sure to never let that happen.
fn from_int(x: u64) -> Self;
- /// Get the value 10<sup>e</sup> from a pre-computed table. Panics for e >=
- /// ceil_log5_of_max_sig().
+ /// Get the value 10<sup>e</sup> from a pre-computed table.
+ /// Panics for `e >= CEIL_LOG5_OF_MAX_SIG`.
fn short_fast_pow10(e: usize) -> Self;
- // FIXME Everything that follows should be associated constants, but taking the value of an
- // associated constant from a type parameter does not work (yet?)
- // A possible workaround is having a `FloatInfo` struct for all the constants, but so far
- // the methods aren't painful enough to rewrite.
-
/// What the name says. It's easier to hard code than juggling intrinsics and
/// hoping LLVM constant folds it.
- fn ceil_log5_of_max_sig() -> i16;
+ const CEIL_LOG5_OF_MAX_SIG: i16;
// A conservative bound on the decimal digits of inputs that can't produce overflow or zero or
/// subnormals. Probably the decimal exponent of the maximum normal value, hence the name.
- fn max_normal_digits() -> usize;
+ const MAX_NORMAL_DIGITS: usize;
/// When the most significant decimal digit has a place value greater than this, the number
/// is certainly rounded to infinity.
- fn inf_cutoff() -> i64;
+ const INF_CUTOFF: i64;
/// When the most significant decimal digit has a place value less than this, the number
/// is certainly rounded to zero.
- fn zero_cutoff() -> i64;
+ const ZERO_CUTOFF: i64;
/// The number of bits in the exponent.
- fn exp_bits() -> u8;
+ const EXP_BITS: u8;
/// The number of bits in the singificand, *including* the hidden bit.
- fn sig_bits() -> u8;
+ const SIG_BITS: u8;
/// The number of bits in the singificand, *excluding* the hidden bit.
- fn explicit_sig_bits() -> u8 {
- Self::sig_bits() - 1
- }
+ const EXPLICIT_SIG_BITS: u8;
/// The maximum legal exponent in fractional representation.
- fn max_exp() -> i16 {
- (1 << (Self::exp_bits() - 1)) - 1
- }
+ const MAX_EXP: i16;
/// The minimum legal exponent in fractional representation, excluding subnormals.
- fn min_exp() -> i16 {
- -Self::max_exp() + 1
- }
+ const MIN_EXP: i16;
/// `MAX_EXP` for integral representation, i.e., with the shift applied.
- fn max_exp_int() -> i16 {
- Self::max_exp() - (Self::sig_bits() as i16 - 1)
- }
+ const MAX_EXP_INT: i16;
/// `MAX_EXP` encoded (i.e., with offset bias)
- fn max_encoded_exp() -> i16 {
- (1 << Self::exp_bits()) - 1
- }
+ const MAX_ENCODED_EXP: i16;
/// `MIN_EXP` for integral representation, i.e., with the shift applied.
- fn min_exp_int() -> i16 {
- Self::min_exp() - (Self::sig_bits() as i16 - 1)
- }
+ const MIN_EXP_INT: i16;
/// The maximum normalized singificand in integral representation.
- fn max_sig() -> u64 {
- (1 << Self::sig_bits()) - 1
- }
+ const MAX_SIG: u64;
/// The minimal normalized significand in integral representation.
- fn min_sig() -> u64 {
- 1 << (Self::sig_bits() - 1)
- }
+ const MIN_SIG: u64;
}
-impl RawFloat for f32 {
- fn zero2() -> Self {
- 0.0
- }
-
- fn sig_bits() -> u8 {
- 24
- }
-
- fn exp_bits() -> u8 {
- 8
+// Mostly a workaround for #34344.
+macro_rules! other_constants {
+ ($type: ident) => {
+ const EXPLICIT_SIG_BITS: u8 = Self::SIG_BITS - 1;
+ const MAX_EXP: i16 = (1 << (Self::EXP_BITS - 1)) - 1;
+ const MIN_EXP: i16 = -Self::MAX_EXP + 1;
+ const MAX_EXP_INT: i16 = Self::MAX_EXP - (Self::SIG_BITS as i16 - 1);
+ const MAX_ENCODED_EXP: i16 = (1 << Self::EXP_BITS) - 1;
+ const MIN_EXP_INT: i16 = Self::MIN_EXP - (Self::SIG_BITS as i16 - 1);
+ const MAX_SIG: u64 = (1 << Self::SIG_BITS) - 1;
+ const MIN_SIG: u64 = 1 << (Self::SIG_BITS - 1);
+
+ const INFINITY: Self = $crate::$type::INFINITY;
+ const NAN: Self = $crate::$type::NAN;
+ const ZERO: Self = 0.0;
}
+}
- fn ceil_log5_of_max_sig() -> i16 {
- 11
- }
+impl RawFloat for f32 {
+ const SIG_BITS: u8 = 24;
+ const EXP_BITS: u8 = 8;
+ const CEIL_LOG5_OF_MAX_SIG: i16 = 11;
+ const MAX_NORMAL_DIGITS: usize = 35;
+ const INF_CUTOFF: i64 = 40;
+ const ZERO_CUTOFF: i64 = -48;
+ other_constants!(f32);
fn transmute(self) -> u64 {
let bits: u32 = unsafe { transmute(self) };
fn short_fast_pow10(e: usize) -> Self {
table::F32_SHORT_POWERS[e]
}
-
- fn max_normal_digits() -> usize {
- 35
- }
-
- fn inf_cutoff() -> i64 {
- 40
- }
-
- fn zero_cutoff() -> i64 {
- -48
- }
}
impl RawFloat for f64 {
- fn zero2() -> Self {
- 0.0
- }
-
- fn sig_bits() -> u8 {
- 53
- }
-
- fn exp_bits() -> u8 {
- 11
- }
-
- fn ceil_log5_of_max_sig() -> i16 {
- 23
- }
+ const SIG_BITS: u8 = 53;
+ const EXP_BITS: u8 = 11;
+ const CEIL_LOG5_OF_MAX_SIG: i16 = 23;
+ const MAX_NORMAL_DIGITS: usize = 305;
+ const INF_CUTOFF: i64 = 310;
+ const ZERO_CUTOFF: i64 = -326;
+ other_constants!(f64);
fn transmute(self) -> u64 {
let bits: u64 = unsafe { transmute(self) };
fn short_fast_pow10(e: usize) -> Self {
table::F64_SHORT_POWERS[e]
}
-
- fn max_normal_digits() -> usize {
- 305
- }
-
- fn inf_cutoff() -> i64 {
- 310
- }
-
- fn zero_cutoff() -> i64 {
- -326
- }
-
}
-/// Convert an Fp to the closest f64. Only handles number that fit into a normalized f64.
+/// Convert an Fp to the closest machine float type.
+/// Does not handle subnormal results.
pub fn fp_to_float<T: RawFloat>(x: Fp) -> T {
let x = x.normalize();
// x.f is 64 bit, so x.e has a mantissa shift of 63
let e = x.e + 63;
- if e > T::max_exp() {
+ if e > T::MAX_EXP {
panic!("fp_to_float: exponent {} too large", e)
- } else if e > T::min_exp() {
+ } else if e > T::MIN_EXP {
encode_normal(round_normal::<T>(x))
} else {
panic!("fp_to_float: exponent {} too small", e)
}
}
-/// Round the 64-bit significand to 53 bit with half-to-even. Does not handle exponent overflow.
+/// Round the 64-bit significand to T::SIG_BITS bits with half-to-even.
+/// Does not handle exponent overflow.
pub fn round_normal<T: RawFloat>(x: Fp) -> Unpacked {
- let excess = 64 - T::sig_bits() as i16;
+ let excess = 64 - T::SIG_BITS as i16;
let half: u64 = 1 << (excess - 1);
let (q, rem) = (x.f >> excess, x.f & ((1 << excess) - 1));
assert_eq!(q << excess | rem, x.f);
Unpacked::new(q, k)
} else if rem == half && (q % 2) == 0 {
Unpacked::new(q, k)
- } else if q == T::max_sig() {
- Unpacked::new(T::min_sig(), k + 1)
+ } else if q == T::MAX_SIG {
+ Unpacked::new(T::MIN_SIG, k + 1)
} else {
Unpacked::new(q + 1, k)
}
/// Inverse of `RawFloat::unpack()` for normalized numbers.
/// Panics if the significand or exponent are not valid for normalized numbers.
pub fn encode_normal<T: RawFloat>(x: Unpacked) -> T {
- debug_assert!(T::min_sig() <= x.sig && x.sig <= T::max_sig(),
+ debug_assert!(T::MIN_SIG <= x.sig && x.sig <= T::MAX_SIG,
"encode_normal: significand not normalized");
// Remove the hidden bit
- let sig_enc = x.sig & !(1 << T::explicit_sig_bits());
+ let sig_enc = x.sig & !(1 << T::EXPLICIT_SIG_BITS);
// Adjust the exponent for exponent bias and mantissa shift
- let k_enc = x.k + T::max_exp() + T::explicit_sig_bits() as i16;
- debug_assert!(k_enc != 0 && k_enc < T::max_encoded_exp(),
+ let k_enc = x.k + T::MAX_EXP + T::EXPLICIT_SIG_BITS as i16;
+ debug_assert!(k_enc != 0 && k_enc < T::MAX_ENCODED_EXP,
"encode_normal: exponent out of range");
// Leave sign bit at 0 ("+"), our numbers are all positive
- let bits = (k_enc as u64) << T::explicit_sig_bits() | sig_enc;
+ let bits = (k_enc as u64) << T::EXPLICIT_SIG_BITS | sig_enc;
T::from_bits(bits)
}
-/// Construct the subnormal. A mantissa of 0 is allowed and constructs zero.
+/// Construct a subnormal. A mantissa of 0 is allowed and constructs zero.
pub fn encode_subnormal<T: RawFloat>(significand: u64) -> T {
- assert!(significand < T::min_sig(), "encode_subnormal: not actually subnormal");
+ assert!(significand < T::MIN_SIG, "encode_subnormal: not actually subnormal");
// Encoded exponent is 0, the sign bit is 0, so we just have to reinterpret the bits.
T::from_bits(significand)
}
Zero => panic!("prev_float: argument is zero"),
Normal => {
let Unpacked { sig, k } = x.unpack();
- if sig == T::min_sig() {
- encode_normal(Unpacked::new(T::max_sig(), k - 1))
+ if sig == T::MIN_SIG {
+ encode_normal(Unpacked::new(T::MAX_SIG, k - 1))
} else {
encode_normal(Unpacked::new(sig - 1, k))
}
pub fn next_float<T: RawFloat>(x: T) -> T {
match x.classify() {
Nan => panic!("next_float: argument is NaN"),
- Infinite => T::infinity2(),
+ Infinite => T::INFINITY,
// This seems too good to be true, but it works.
// 0.0 is encoded as the all-zero word. Subnormals are 0x000m...m where m is the mantissa.
// In particular, the smallest subnormal is 0x0...01 and the largest is 0x000F...F.
use mem;
use ptr;
-/// Holds a value, but never drops it.
-#[allow(unions_with_drop_fields)]
-union NoDrop<T> {
- value: T
-}
-
/// When dropped, copies from `src` into `dest`.
struct CopyOnDrop<T> {
src: *mut T,
// Read the first element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
- let mut tmp = NoDrop { value: ptr::read(v.get_unchecked(0)) };
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
let mut hole = CopyOnDrop {
- src: &mut tmp.value,
+ src: &mut *tmp,
dest: v.get_unchecked_mut(1),
};
ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
for i in 2..len {
- if !is_less(v.get_unchecked(i), &tmp.value) {
+ if !is_less(v.get_unchecked(i), &*tmp) {
break;
}
// Read the last element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
- let mut tmp = NoDrop { value: ptr::read(v.get_unchecked(len - 1)) };
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
let mut hole = CopyOnDrop {
- src: &mut tmp.value,
+ src: &mut *tmp,
dest: v.get_unchecked_mut(len - 2),
};
ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
for i in (0..len-2).rev() {
- if !is_less(&tmp.value, v.get_unchecked(i)) {
+ if !is_less(&*tmp, v.get_unchecked(i)) {
break;
}
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
- let mut tmp = NoDrop { value: unsafe { ptr::read(pivot) } };
+ let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = CopyOnDrop {
- src: unsafe { &mut tmp.value },
+ src: &mut *tmp,
dest: pivot,
};
- let pivot = unsafe { &tmp.value };
+ let pivot = &*tmp;
// Find the first pair of out-of-order elements.
let mut l = 0;
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
- let mut tmp = NoDrop { value: unsafe { ptr::read(pivot) } };
+ let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = CopyOnDrop {
- src: unsafe { &mut tmp.value },
+ src: &mut *tmp,
dest: pivot,
};
- let pivot = unsafe { &tmp.value };
+ let pivot = &*tmp;
// Now partition the slice.
let mut l = 0;
assert!(shape > 0.0, "Gamma::new called with shape <= 0");
assert!(scale > 0.0, "Gamma::new called with scale <= 0");
- let repr = match shape {
- 1.0 => One(Exp::new(1.0 / scale)),
- 0.0...1.0 => Small(GammaSmallShape::new_raw(shape, scale)),
- _ => Large(GammaLargeShape::new_raw(shape, scale)),
+ let repr = if shape == 1.0 {
+ One(Exp::new(1.0 / scale))
+ } else if 0.0 <= shape && shape < 1.0 {
+ Small(GammaSmallShape::new_raw(shape, scale))
+ } else {
+ Large(GammaLargeShape::new_raw(shape, scale))
};
- Gamma { repr: repr }
+ Gamma { repr }
}
}
// Represents different phases in the compiler.
CollectLanguageItems,
- CheckStaticRecursion,
ResolveLifetimes,
RegionResolveCrate,
- CheckLoops,
PluginRegistrar,
StabilityIndex,
CollectItem(D),
CollectItemSig(D),
Coherence,
- EffectCheck,
- Liveness,
Resolve,
EntryPoint,
CheckEntryFn,
MirKrate => Some(MirKrate),
TypeckBodiesKrate => Some(TypeckBodiesKrate),
CollectLanguageItems => Some(CollectLanguageItems),
- CheckStaticRecursion => Some(CheckStaticRecursion),
ResolveLifetimes => Some(ResolveLifetimes),
RegionResolveCrate => Some(RegionResolveCrate),
- CheckLoops => Some(CheckLoops),
PluginRegistrar => Some(PluginRegistrar),
StabilityIndex => Some(StabilityIndex),
Coherence => Some(Coherence),
- EffectCheck => Some(EffectCheck),
- Liveness => Some(Liveness),
Resolve => Some(Resolve),
EntryPoint => Some(EntryPoint),
CheckEntryFn => Some(CheckEntryFn),
// Macro namespace
Macro(DefId, MacroKind),
+ GlobalAsm(DefId),
+
// Both namespaces
Err,
}
Def::Variant(id) | Def::VariantCtor(id, ..) | Def::Enum(id) | Def::TyAlias(id) |
Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) |
Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) |
- Def::AssociatedConst(id) | Def::Local(id) | Def::Upvar(id, ..) | Def::Macro(id, ..) => {
+ Def::AssociatedConst(id) | Def::Local(id) | Def::Upvar(id, ..) | Def::Macro(id, ..) |
+ Def::GlobalAsm(id) => {
id
}
Def::Label(..) => "label",
Def::SelfTy(..) => "self type",
Def::Macro(..) => "macro",
+ Def::GlobalAsm(..) => "global asm",
Def::Err => "unresolved item",
}
}
visitor.visit_id(item.id);
walk_list!(visitor, visit_foreign_item, &foreign_module.items);
}
+ ItemGlobalAsm(_) => {
+ visitor.visit_id(item.id);
+ }
ItemTy(ref typ, ref type_parameters) => {
visitor.visit_id(item.id);
visitor.visit_ty(typ);
}
}
+ fn lower_global_asm(&mut self, ga: &GlobalAsm) -> P<hir::GlobalAsm> {
+ P(hir::GlobalAsm {
+ asm: ga.asm,
+ ctxt: ga.ctxt,
+ })
+ }
+
fn lower_variant(&mut self, v: &Variant) -> hir::Variant {
Spanned {
node: hir::Variant_ {
}
ItemKind::Mod(ref m) => hir::ItemMod(self.lower_mod(m)),
ItemKind::ForeignMod(ref nm) => hir::ItemForeignMod(self.lower_foreign_mod(nm)),
+ ItemKind::GlobalAsm(ref ga) => hir::ItemGlobalAsm(self.lower_global_asm(ga)),
ItemKind::Ty(ref t, ref generics) => {
hir::ItemTy(self.lower_ty(t), self.lower_generics(generics))
}
//
// match <sub_expr> {
// <pat> => <body>,
- // [_ if <else_opt_if_cond> => <else_opt_if_body>,]
// _ => [<else_opt> | ()]
// }
arms.push(self.arm(hir_vec![pat], body_expr));
}
- // `[_ if <else_opt_if_cond> => <else_opt_if_body>,]`
- // `_ => [<else_opt> | ()]`
+ // _ => [<else_opt>|()]
{
- let mut current: Option<&Expr> = else_opt.as_ref().map(|p| &**p);
- let mut else_exprs: Vec<Option<&Expr>> = vec![current];
-
- // First, we traverse the AST and recursively collect all
- // `else` branches into else_exprs, e.g.:
- //
- // if let Some(_) = x {
- // ...
- // } else if ... { // Expr1
- // ...
- // } else if ... { // Expr2
- // ...
- // } else { // Expr3
- // ...
- // }
- //
- // ... results in else_exprs = [Some(&Expr1),
- // Some(&Expr2),
- // Some(&Expr3)]
- //
- // Because there also the case there is no `else`, these
- // entries can also be `None`, as in:
- //
- // if let Some(_) = x {
- // ...
- // } else if ... { // Expr1
- // ...
- // } else if ... { // Expr2
- // ...
- // }
- //
- // ... results in else_exprs = [Some(&Expr1),
- // Some(&Expr2),
- // None]
- //
- // The last entry in this list is always translated into
- // the final "unguard" wildcard arm of the `match`. In the
- // case of a `None`, it becomes `_ => ()`.
- loop {
- if let Some(e) = current {
- // There is an else branch at this level
- if let ExprKind::If(_, _, ref else_opt) = e.node {
- // The else branch is again an if-expr
- current = else_opt.as_ref().map(|p| &**p);
- else_exprs.push(current);
- } else {
- // The last item in the list is not an if-expr,
- // stop here
- break
- }
- } else {
- // We have no more else branch
- break
- }
- }
-
- // Now translate the list of nested else-branches into the
- // arms of the match statement.
- for else_expr in else_exprs {
- if let Some(else_expr) = else_expr {
- let (guard, body) = if let ExprKind::If(ref cond,
- ref then,
- _) = else_expr.node {
- let then = self.lower_block(then, false);
- (Some(cond),
- self.expr_block(then, ThinVec::new()))
- } else {
- (None,
- self.lower_expr(else_expr))
- };
-
- arms.push(hir::Arm {
- attrs: hir_vec![],
- pats: hir_vec![self.pat_wild(e.span)],
- guard: guard.map(|e| P(self.lower_expr(e))),
- body: P(body),
- });
- } else {
- // There was no else-branch, push a noop
- let pat_under = self.pat_wild(e.span);
- let unit = self.expr_tuple(e.span, hir_vec![]);
- arms.push(self.arm(hir_vec![pat_under], unit));
- }
- }
+ let wildcard_arm: Option<&Expr> = else_opt.as_ref().map(|p| &**p);
+ let wildcard_pattern = self.pat_wild(e.span);
+ let body = if let Some(else_expr) = wildcard_arm {
+ P(self.lower_expr(else_expr))
+ } else {
+ self.expr_tuple(e.span, hir_vec![])
+ };
+ arms.push(self.arm(hir_vec![wildcard_pattern], body));
}
let contains_else_clause = else_opt.is_some();
DefPathData::ValueNs(i.ident.name.as_str()),
ItemKind::MacroDef(..) => DefPathData::MacroDef(i.ident.name.as_str()),
ItemKind::Mac(..) => return self.visit_macro_invoc(i.id, false),
+ ItemKind::GlobalAsm(..) => DefPathData::Misc,
ItemKind::Use(ref view_path) => {
match view_path.node {
ViewPathGlob(..) => {}
self.local_def_id(self.body_owner(id))
}
+ /// Given a body owner's id, returns the `BodyId` associated with it.
+ pub fn body_owned_by(&self, id: NodeId) -> BodyId {
+ if let Some(entry) = self.find_entry(id) {
+ if let Some(body_id) = entry.associated_body() {
+ // For item-like things and closures, the associated
+ // body has its own distinct id, and that is returned
+ // by `associated_body`.
+ body_id
+ } else {
+ // For some expressions, the expression is its own body.
+ if let EntryExpr(_, expr) = entry {
+ BodyId { node_id: expr.id }
+ } else {
+ span_bug!(self.span(id), "id `{}` has no associated body", id);
+ }
+ }
+ } else {
+ bug!("no entry for id `{}`", id)
+ }
+ }
+
pub fn ty_param_owner(&self, id: NodeId) -> NodeId {
match self.get(id) {
NodeItem(&Item { node: ItemTrait(..), .. }) => id,
ItemFn(..) => "fn",
ItemMod(..) => "mod",
ItemForeignMod(..) => "foreign mod",
+ ItemGlobalAsm(..) => "global asm",
ItemTy(..) => "ty",
ItemEnum(..) => "enum",
ItemStruct(..) => "struct",
pub items: HirVec<ForeignItem>,
}
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub struct GlobalAsm {
+ pub asm: Symbol,
+ pub ctxt: SyntaxContext,
+}
+
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct EnumDef {
pub variants: HirVec<Variant>,
ItemMod(Mod),
/// An external module
ItemForeignMod(ForeignMod),
+ /// Module-level inline assembly (from global_asm!)
+ ItemGlobalAsm(P<GlobalAsm>),
/// A type alias, e.g. `type Foo = Bar<u8>`
ItemTy(P<Ty>, Generics),
/// An enum definition, e.g. `enum Foo<A, B> {C<A>, D<B>}`
ItemFn(..) => "function",
ItemMod(..) => "module",
ItemForeignMod(..) => "foreign module",
+ ItemGlobalAsm(..) => "global asm",
ItemTy(..) => "type alias",
ItemEnum(..) => "enum",
ItemStruct(..) => "struct",
self.print_foreign_mod(nmod, &item.attrs)?;
self.bclose(item.span)?;
}
+ hir::ItemGlobalAsm(ref ga) => {
+ self.head(&visibility_qualified(&item.vis, "global asm"))?;
+ word(&mut self.s, &ga.asm.as_str())?;
+ self.end()?
+ }
hir::ItemTy(ref ty, ref params) => {
self.ibox(indent_unit)?;
self.ibox(0)?;
use ich::{self, CachingCodemapView};
use session::config::DebugInfoLevel::NoDebugInfo;
use ty;
+use util::nodemap::NodeMap;
use std::hash as std_hash;
+use std::collections::{HashMap, HashSet};
use syntax::ast;
use syntax::attr;
}
}
}
+
+pub fn hash_stable_hashmap<'a, 'tcx, K, V, R, SK, F, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ map: &HashMap<K, V, R>,
+ extract_stable_key: F)
+ where K: Eq + std_hash::Hash,
+ V: HashStable<StableHashingContext<'a, 'tcx>>,
+ R: std_hash::BuildHasher,
+ SK: HashStable<StableHashingContext<'a, 'tcx>> + Ord + Clone,
+ F: Fn(&mut StableHashingContext<'a, 'tcx>, &K) -> SK,
+ W: StableHasherResult,
+{
+ let mut keys: Vec<_> = map.keys()
+ .map(|k| (extract_stable_key(hcx, k), k))
+ .collect();
+ keys.sort_unstable_by_key(|&(ref stable_key, _)| stable_key.clone());
+ keys.len().hash_stable(hcx, hasher);
+ for (stable_key, key) in keys {
+ stable_key.hash_stable(hcx, hasher);
+ map[key].hash_stable(hcx, hasher);
+ }
+}
+
+pub fn hash_stable_hashset<'a, 'tcx, K, R, SK, F, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ set: &HashSet<K, R>,
+ extract_stable_key: F)
+ where K: Eq + std_hash::Hash,
+ R: std_hash::BuildHasher,
+ SK: HashStable<StableHashingContext<'a, 'tcx>> + Ord + Clone,
+ F: Fn(&mut StableHashingContext<'a, 'tcx>, &K) -> SK,
+ W: StableHasherResult,
+{
+ let mut keys: Vec<_> = set.iter()
+ .map(|k| extract_stable_key(hcx, k))
+ .collect();
+ keys.sort_unstable();
+ keys.hash_stable(hcx, hasher);
+}
+
+pub fn hash_stable_nodemap<'a, 'tcx, V, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ map: &NodeMap<V>)
+ where V: HashStable<StableHashingContext<'a, 'tcx>>,
+ W: StableHasherResult,
+{
+ hash_stable_hashmap(hcx, hasher, map, |hcx, node_id| {
+ hcx.tcx.hir.definitions().node_to_hir_id(*node_id).local_id
+ });
+}
hir::ItemFn(..) |
hir::ItemMod(..) |
hir::ItemForeignMod(..) |
+ hir::ItemGlobalAsm(..) |
hir::ItemTy(..) |
hir::ItemEnum(..) |
hir::ItemStruct(..) |
ItemFn(fn_decl, unsafety, constness, abi, generics, body_id),
ItemMod(module),
ItemForeignMod(foreign_mod),
+ ItemGlobalAsm(global_asm),
ItemTy(ty, generics),
ItemEnum(enum_def, generics),
ItemStruct(variant_data, generics),
is_indirect
});
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::GlobalAsm {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ let hir::GlobalAsm {
+ asm,
+ ctxt: _
+ } = *self;
+
+ asm.hash_stable(hcx, hasher);
+ }
+}
+
impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::InlineAsm {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'tcx>,
Upvar(def_id, index, expr_id),
Label(node_id),
Macro(def_id, macro_kind),
+ GlobalAsm(def_id),
Err
});
impl_stable_hash_for!(enum mir::Mutability { Mut, Not });
impl_stable_hash_for!(enum mir::BorrowKind { Shared, Unique, Mut });
impl_stable_hash_for!(enum mir::LocalKind { Var, Temp, Arg, ReturnPointer });
-impl_stable_hash_for!(struct mir::LocalDecl<'tcx> { mutability, ty, name, source_info });
+impl_stable_hash_for!(struct mir::LocalDecl<'tcx> { mutability, ty, name, source_info,
+is_user_variable});
impl_stable_hash_for!(struct mir::UpvarDecl { debug_name, by_ref });
impl_stable_hash_for!(struct mir::BasicBlockData<'tcx> { statements, terminator, is_cleanup });
impl_stable_hash_for!(struct mir::Terminator<'tcx> { source_info, kind });
//! This module contains `HashStable` implementations for various data types
//! from rustc::ty in no particular order.
-use ich::StableHashingContext;
+use ich::{self, StableHashingContext, NodeIdHashingMode};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHasherResult};
use std::hash as std_hash;
use std::mem;
+use syntax_pos::symbol::InternedString;
use ty;
-
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::Ty<'tcx> {
- fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
- hasher: &mut StableHasher<W>) {
- let type_hash = hcx.tcx().type_id_hash(*self);
- type_hash.hash_stable(hcx, hasher);
- }
-}
-
impl_stable_hash_for!(struct ty::ItemSubsts<'tcx> { substs });
-impl<'a, 'tcx, T> HashStable<StableHashingContext<'a, 'tcx>> for ty::Slice<T>
+impl<'a, 'tcx, T> HashStable<StableHashingContext<'a, 'tcx>> for &'tcx ty::Slice<T>
where T: HashStable<StableHashingContext<'a, 'tcx>> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'tcx>,
hasher: &mut StableHasher<W>) {
- (&**self).hash_stable(hcx, hasher);
+ (&self[..]).hash_stable(hcx, hasher);
}
}
index.hash_stable(hcx, hasher);
name.hash_stable(hcx, hasher);
}
+ ty::ReScope(code_extent) => {
+ code_extent.hash_stable(hcx, hasher);
+ }
+ ty::ReFree(ref free_region) => {
+ free_region.hash_stable(hcx, hasher);
+ }
ty::ReLateBound(..) |
- ty::ReFree(..) |
- ty::ReScope(..) |
ty::ReVar(..) |
ty::ReSkolemized(..) => {
bug!("TypeIdHasher: unexpected region {:?}", *self)
MutBorrow
});
-
impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::UpvarCapture<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'tcx>,
impl_stable_hash_for!(struct ty::TraitRef<'tcx> { def_id, substs });
impl_stable_hash_for!(struct ty::TraitPredicate<'tcx> { trait_ref });
impl_stable_hash_for!(tuple_struct ty::EquatePredicate<'tcx> { t1, t2 });
+impl_stable_hash_for!(struct ty::SubtypePredicate<'tcx> { a_is_expected, a, b });
impl<'a, 'tcx, A, B> HashStable<StableHashingContext<'a, 'tcx>> for ty::OutlivesPredicate<A, B>
where A: HashStable<StableHashingContext<'a, 'tcx>>,
ty::Predicate::Equate(ref pred) => {
pred.hash_stable(hcx, hasher);
}
+ ty::Predicate::Subtype(ref pred) => {
+ pred.hash_stable(hcx, hasher);
+ }
ty::Predicate::RegionOutlives(ref pred) => {
pred.hash_stable(hcx, hasher);
}
}
}
-
impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::AdtFlags {
fn hash_stable<W: StableHasherResult>(&self,
_: &mut StableHashingContext<'a, 'tcx>,
def_id.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
}
- ConstVal::Struct(ref _name_value_map) => {
- // BTreeMap<ast::Name, ConstVal<'tcx>>),
- panic!("Ordering still unstable")
+ ConstVal::Struct(ref name_value_map) => {
+ let mut values: Vec<(InternedString, &ConstVal)> =
+ name_value_map.iter()
+ .map(|(name, val)| (name.as_str(), val))
+ .collect();
+
+ values.sort_unstable_by_key(|&(ref name, _)| name.clone());
+ values.hash_stable(hcx, hasher);
}
ConstVal::Tuple(ref value) => {
value.hash_stable(hcx, hasher);
impl_stable_hash_for!(struct ty::ClosureSubsts<'tcx> { substs });
-
impl_stable_hash_for!(struct ty::GenericPredicates<'tcx> {
parent,
predicates
impl_stable_hash_for!(struct ty::DebruijnIndex {
depth
});
+
+impl_stable_hash_for!(enum ty::cast::CastKind {
+ CoercionCast,
+ PtrPtrCast,
+ PtrAddrCast,
+ AddrPtrCast,
+ NumericCast,
+ EnumCast,
+ PrimIntCast,
+ U8CharCast,
+ ArrayPtrCast,
+ FnPtrPtrCast,
+ FnPtrAddrCast
+});
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::middle::region::CodeExtent
+{
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+ hcx.tcx().region_maps.code_extent_data(*self).hash_stable(hcx, hasher);
+ });
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::middle::region::CodeExtentData
+{
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ use middle::region::CodeExtentData;
+
+ mem::discriminant(self).hash_stable(hcx, hasher);
+ match *self {
+ CodeExtentData::Misc(node_id) |
+ CodeExtentData::DestructionScope(node_id) => {
+ node_id.hash_stable(hcx, hasher);
+ }
+ CodeExtentData::CallSiteScope { fn_id, body_id } |
+ CodeExtentData::ParameterScope { fn_id, body_id } => {
+ fn_id.hash_stable(hcx, hasher);
+ body_id.hash_stable(hcx, hasher);
+ }
+ CodeExtentData::Remainder(block_remainder) => {
+ block_remainder.hash_stable(hcx, hasher);
+ }
+ }
+ }
+}
+
+impl_stable_hash_for!(struct ::middle::region::BlockRemainder {
+ block,
+ first_statement_index
+});
+
+impl_stable_hash_for!(struct ty::adjustment::CoerceUnsizedInfo {
+ custom_kind
+});
+
+impl_stable_hash_for!(struct ty::FreeRegion {
+ scope,
+ bound_region
+});
+
+impl_stable_hash_for!(enum ty::BoundRegion {
+ BrAnon(index),
+ BrNamed(def_id, name),
+ BrFresh(index),
+ BrEnv
+});
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::TypeVariants<'tcx>
+{
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ use ty::TypeVariants::*;
+
+ mem::discriminant(self).hash_stable(hcx, hasher);
+ match *self {
+ TyBool |
+ TyChar |
+ TyStr |
+ TyNever => {
+ // Nothing more to hash.
+ }
+ TyInt(int_ty) => {
+ int_ty.hash_stable(hcx, hasher);
+ }
+ TyUint(uint_ty) => {
+ uint_ty.hash_stable(hcx, hasher);
+ }
+ TyFloat(float_ty) => {
+ float_ty.hash_stable(hcx, hasher);
+ }
+ TyAdt(adt_def, substs) => {
+ adt_def.hash_stable(hcx, hasher);
+ substs.hash_stable(hcx, hasher);
+ }
+ TyArray(inner_ty, len) => {
+ inner_ty.hash_stable(hcx, hasher);
+ len.hash_stable(hcx, hasher);
+ }
+ TySlice(inner_ty) => {
+ inner_ty.hash_stable(hcx, hasher);
+ }
+ TyRawPtr(pointee_ty) => {
+ pointee_ty.hash_stable(hcx, hasher);
+ }
+ TyRef(region, pointee_ty) => {
+ region.hash_stable(hcx, hasher);
+ pointee_ty.hash_stable(hcx, hasher);
+ }
+ TyFnDef(def_id, substs, ref sig) => {
+ def_id.hash_stable(hcx, hasher);
+ substs.hash_stable(hcx, hasher);
+ sig.hash_stable(hcx, hasher);
+ }
+ TyFnPtr(ref sig) => {
+ sig.hash_stable(hcx, hasher);
+ }
+ TyDynamic(ref existential_predicates, region) => {
+ existential_predicates.hash_stable(hcx, hasher);
+ region.hash_stable(hcx, hasher);
+ }
+ TyClosure(def_id, closure_substs) => {
+ def_id.hash_stable(hcx, hasher);
+ closure_substs.hash_stable(hcx, hasher);
+ }
+ TyTuple(inner_tys, from_diverging_type_var) => {
+ inner_tys.hash_stable(hcx, hasher);
+ from_diverging_type_var.hash_stable(hcx, hasher);
+ }
+ TyProjection(ref projection_ty) => {
+ projection_ty.hash_stable(hcx, hasher);
+ }
+ TyAnon(def_id, substs) => {
+ def_id.hash_stable(hcx, hasher);
+ substs.hash_stable(hcx, hasher);
+ }
+ TyParam(param_ty) => {
+ param_ty.hash_stable(hcx, hasher);
+ }
+
+ TyError |
+ TyInfer(..) => {
+ bug!("ty::TypeVariants::hash_stable() - Unexpected variant.")
+ }
+ }
+ }
+}
+
+impl_stable_hash_for!(struct ty::ParamTy {
+ idx,
+ name
+});
+
+impl_stable_hash_for!(struct ty::TypeAndMut<'tcx> {
+ ty,
+ mutbl
+});
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::ExistentialPredicate<'tcx>
+{
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ mem::discriminant(self).hash_stable(hcx, hasher);
+ match *self {
+ ty::ExistentialPredicate::Trait(ref trait_ref) => {
+ trait_ref.hash_stable(hcx, hasher);
+ }
+ ty::ExistentialPredicate::Projection(ref projection) => {
+ projection.hash_stable(hcx, hasher);
+ }
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ def_id.hash_stable(hcx, hasher);
+ }
+ }
+ }
+}
+
+impl_stable_hash_for!(struct ty::ExistentialTraitRef<'tcx> {
+ def_id,
+ substs
+});
+
+impl_stable_hash_for!(struct ty::ExistentialProjection<'tcx> {
+ trait_ref,
+ item_name,
+ ty
+});
+
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::TypeckTables<'tcx> {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ let ty::TypeckTables {
+ ref type_relative_path_defs,
+ ref node_types,
+ ref item_substs,
+ ref adjustments,
+ ref method_map,
+ ref upvar_capture_map,
+ ref closure_tys,
+ ref closure_kinds,
+ ref liberated_fn_sigs,
+ ref fru_field_types,
+
+ ref cast_kinds,
+
+ // FIXME(#41184): This is still ignored at the moment.
+ lints: _,
+ ref used_trait_imports,
+ tainted_by_errors,
+ ref free_region_map,
+ } = *self;
+
+ hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+ ich::hash_stable_nodemap(hcx, hasher, type_relative_path_defs);
+ ich::hash_stable_nodemap(hcx, hasher, node_types);
+ ich::hash_stable_nodemap(hcx, hasher, item_substs);
+ ich::hash_stable_nodemap(hcx, hasher, adjustments);
+
+ ich::hash_stable_hashmap(hcx, hasher, method_map, |hcx, method_call| {
+ let ty::MethodCall {
+ expr_id,
+ autoderef
+ } = *method_call;
+
+ let def_id = hcx.tcx().hir.local_def_id(expr_id);
+ (hcx.def_path_hash(def_id), autoderef)
+ });
+
+ ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| {
+ let ty::UpvarId {
+ var_id,
+ closure_expr_id
+ } = *up_var_id;
+
+ let var_def_id = hcx.tcx().hir.local_def_id(var_id);
+ let closure_def_id = hcx.tcx().hir.local_def_id(closure_expr_id);
+ (hcx.def_path_hash(var_def_id), hcx.def_path_hash(closure_def_id))
+ });
+
+ ich::hash_stable_nodemap(hcx, hasher, closure_tys);
+ ich::hash_stable_nodemap(hcx, hasher, closure_kinds);
+ ich::hash_stable_nodemap(hcx, hasher, liberated_fn_sigs);
+ ich::hash_stable_nodemap(hcx, hasher, fru_field_types);
+ ich::hash_stable_nodemap(hcx, hasher, cast_kinds);
+
+ ich::hash_stable_hashset(hcx, hasher, used_trait_imports, |hcx, def_id| {
+ hcx.def_path_hash(*def_id)
+ });
+
+ tainted_by_errors.hash_stable(hcx, hasher);
+ free_region_map.hash_stable(hcx, hasher);
+ })
+ }
+}
pub use self::fingerprint::Fingerprint;
pub use self::caching_codemap_view::CachingCodemapView;
-pub use self::hcx::{StableHashingContext, NodeIdHashingMode};
-
+pub use self::hcx::{StableHashingContext, NodeIdHashingMode, hash_stable_hashmap,
+ hash_stable_hashset, hash_stable_nodemap};
mod fingerprint;
mod caching_codemap_view;
mod hcx;
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Applies the "bivariance relationship" to two types and/or regions.
-//! If (A,B) are bivariant then either A <: B or B <: A. It occurs
-//! when type/lifetime parameters are unconstrained. Usually this is
-//! an error, but we permit it in the specific case where a type
-//! parameter is constrained in a where-clause via an associated type.
-//!
-//! There are several ways one could implement bivariance. You could
-//! just do nothing at all, for example, or you could fully verify
-//! that one of the two subtyping relationships hold. We choose to
-//! thread a middle line: we relate types up to regions, but ignore
-//! all region relationships.
-//!
-//! At one point, handling bivariance in this fashion was necessary
-//! for inference, but I'm actually not sure if that is true anymore.
-//! In particular, it might be enough to say (A,B) are bivariant for
-//! all (A,B).
-
-use super::combine::CombineFields;
-use super::type_variable::{BiTo};
-
-use ty::{self, Ty, TyCtxt};
-use ty::TyVar;
-use ty::relate::{Relate, RelateResult, TypeRelation};
-
-pub struct Bivariate<'combine, 'infcx: 'combine, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> {
- fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>,
- a_is_expected: bool,
-}
-
-impl<'combine, 'infcx, 'gcx, 'tcx> Bivariate<'combine, 'infcx, 'gcx, 'tcx> {
- pub fn new(fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, a_is_expected: bool)
- -> Bivariate<'combine, 'infcx, 'gcx, 'tcx>
- {
- Bivariate { fields: fields, a_is_expected: a_is_expected }
- }
-}
-
-impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx>
- for Bivariate<'combine, 'infcx, 'gcx, 'tcx>
-{
- fn tag(&self) -> &'static str { "Bivariate" }
-
- fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { self.fields.tcx() }
-
- fn a_is_expected(&self) -> bool { self.a_is_expected }
-
- fn relate_with_variance<T: Relate<'tcx>>(&mut self,
- variance: ty::Variance,
- a: &T,
- b: &T)
- -> RelateResult<'tcx, T>
- {
- match variance {
- // If we have Foo<A> and Foo is invariant w/r/t A,
- // and we want to assert that
- //
- // Foo<A> <: Foo<B> ||
- // Foo<B> <: Foo<A>
- //
- // then still A must equal B.
- ty::Invariant => self.relate(a, b),
-
- ty::Covariant => self.relate(a, b),
- ty::Bivariant => self.relate(a, b),
- ty::Contravariant => self.relate(a, b),
- }
- }
-
- fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
- debug!("{}.tys({:?}, {:?})", self.tag(),
- a, b);
- if a == b { return Ok(a); }
-
- let infcx = self.fields.infcx;
- let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
- let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
- match (&a.sty, &b.sty) {
- (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
- infcx.type_variables.borrow_mut().relate_vars(a_id, BiTo, b_id);
- Ok(a)
- }
-
- (&ty::TyInfer(TyVar(a_id)), _) => {
- self.fields.instantiate(b, BiTo, a_id, self.a_is_expected)?;
- Ok(a)
- }
-
- (_, &ty::TyInfer(TyVar(b_id))) => {
- self.fields.instantiate(a, BiTo, b_id, self.a_is_expected)?;
- Ok(a)
- }
-
- _ => {
- self.fields.infcx.super_combine_tys(self, a, b)
- }
- }
- }
-
- fn regions(&mut self, a: &'tcx ty::Region, _: &'tcx ty::Region)
- -> RelateResult<'tcx, &'tcx ty::Region> {
- Ok(a)
- }
-
- fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
- -> RelateResult<'tcx, ty::Binder<T>>
- where T: Relate<'tcx>
- {
- let a1 = self.tcx().erase_late_bound_regions(a);
- let b1 = self.tcx().erase_late_bound_regions(b);
- let c = self.relate(&a1, &b1)?;
- Ok(ty::Binder(c))
- }
-}
// is also useful to track which value is the "expected" value in
// terms of error reporting.
-use super::bivariate::Bivariate;
use super::equate::Equate;
use super::glb::Glb;
use super::lub::Lub;
use super::sub::Sub;
use super::InferCtxt;
use super::{MiscVariable, TypeTrace};
-use super::type_variable::{RelationDir, BiTo, EqTo, SubtypeOf, SupertypeOf};
use ty::{IntType, UintType};
use ty::{self, Ty, TyCtxt};
use traits::PredicateObligations;
use syntax::ast;
-use syntax::util::small_vector::SmallVector;
use syntax_pos::Span;
#[derive(Clone)]
pub obligations: PredicateObligations<'tcx>,
}
+#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
+pub enum RelationDir {
+ SubtypeOf, SupertypeOf, EqTo
+}
+
impl<'infcx, 'gcx, 'tcx> InferCtxt<'infcx, 'gcx, 'tcx> {
pub fn super_combine_tys<R>(&self,
relation: &mut R,
Equate::new(self, a_is_expected)
}
- pub fn bivariate<'a>(&'a mut self, a_is_expected: bool) -> Bivariate<'a, 'infcx, 'gcx, 'tcx> {
- Bivariate::new(self, a_is_expected)
- }
-
pub fn sub<'a>(&'a mut self, a_is_expected: bool) -> Sub<'a, 'infcx, 'gcx, 'tcx> {
Sub::new(self, a_is_expected)
}
Glb::new(self, a_is_expected)
}
+ /// Here dir is either EqTo, SubtypeOf, or SupertypeOf. The
+ /// idea is that we should ensure that the type `a_ty` is equal
+ /// to, a subtype of, or a supertype of (respectively) the type
+ /// to which `b_vid` is bound.
+ ///
+ /// Since `b_vid` has not yet been instantiated with a type, we
+ /// will first instantiate `b_vid` with a *generalized* version
+ /// of `a_ty`. Generalization introduces other inference
+ /// variables wherever subtyping could occur.
pub fn instantiate(&mut self,
a_ty: Ty<'tcx>,
dir: RelationDir,
a_is_expected: bool)
-> RelateResult<'tcx, ()>
{
- // We use SmallVector here instead of Vec because this code is hot and
- // it's rare that the stack length exceeds 1.
- let mut stack = SmallVector::new();
- stack.push((a_ty, dir, b_vid));
- loop {
- // For each turn of the loop, we extract a tuple
- //
- // (a_ty, dir, b_vid)
- //
- // to relate. Here dir is either SubtypeOf or
- // SupertypeOf. The idea is that we should ensure that
- // the type `a_ty` is a subtype or supertype (respectively) of the
- // type to which `b_vid` is bound.
- //
- // If `b_vid` has not yet been instantiated with a type
- // (which is always true on the first iteration, but not
- // necessarily true on later iterations), we will first
- // instantiate `b_vid` with a *generalized* version of
- // `a_ty`. Generalization introduces other inference
- // variables wherever subtyping could occur (at time of
- // this writing, this means replacing free regions with
- // region variables).
- let (a_ty, dir, b_vid) = match stack.pop() {
- None => break,
- Some(e) => e,
- };
- // Get the actual variable that b_vid has been inferred to
- let (b_vid, b_ty) = {
- let mut variables = self.infcx.type_variables.borrow_mut();
- let b_vid = variables.root_var(b_vid);
- (b_vid, variables.probe_root(b_vid))
- };
-
- debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})",
- a_ty,
- dir,
- b_vid);
-
- // Check whether `vid` has been instantiated yet. If not,
- // make a generalized form of `ty` and instantiate with
- // that.
- let b_ty = match b_ty {
- Some(t) => t, // ...already instantiated.
- None => { // ...not yet instantiated:
- // Generalize type if necessary.
- let generalized_ty = match dir {
- EqTo => self.generalize(a_ty, b_vid, false),
- BiTo | SupertypeOf | SubtypeOf => self.generalize(a_ty, b_vid, true),
- }?;
- debug!("instantiate(a_ty={:?}, dir={:?}, \
- b_vid={:?}, generalized_ty={:?})",
- a_ty, dir, b_vid,
- generalized_ty);
- self.infcx.type_variables
- .borrow_mut()
- .instantiate_and_push(
- b_vid, generalized_ty, &mut stack);
- generalized_ty
- }
- };
-
- // The original triple was `(a_ty, dir, b_vid)` -- now we have
- // resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`:
- //
- // FIXME(#16847): This code is non-ideal because all these subtype
- // relations wind up attributed to the same spans. We need
- // to associate causes/spans with each of the relations in
- // the stack to get this right.
- match dir {
- BiTo => self.bivariate(a_is_expected).relate(&a_ty, &b_ty),
- EqTo => self.equate(a_is_expected).relate(&a_ty, &b_ty),
- SubtypeOf => self.sub(a_is_expected).relate(&a_ty, &b_ty),
- SupertypeOf => self.sub(a_is_expected).relate_with_variance(
- ty::Contravariant, &a_ty, &b_ty),
- }?;
- }
+ use self::RelationDir::*;
+
+ // Get the actual variable that b_vid has been inferred to
+ debug_assert!(self.infcx.type_variables.borrow_mut().probe(b_vid).is_none());
+
+ debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", a_ty, dir, b_vid);
+
+ // Generalize type of `a_ty` appropriately depending on the
+ // direction. As an example, assume:
+ //
+ // - `a_ty == &'x ?1`, where `'x` is some free region and `?1` is an
+ // inference variable,
+ // - and `dir` == `SubtypeOf`.
+ //
+ // Then the generalized form `b_ty` would be `&'?2 ?3`, where
+ // `'?2` and `?3` are fresh region/type inference
+ // variables. (Down below, we will relate `a_ty <: b_ty`,
+ // adding constraints like `'x: '?2` and `?1 <: ?3`.)
+ let b_ty = self.generalize(a_ty, b_vid, dir == EqTo)?;
+ debug!("instantiate(a_ty={:?}, dir={:?}, b_vid={:?}, generalized b_ty={:?})",
+ a_ty, dir, b_vid, b_ty);
+ self.infcx.type_variables.borrow_mut().instantiate(b_vid, b_ty);
+
+ // Finally, relate `b_ty` to `a_ty`, as described in previous comment.
+ //
+ // FIXME(#16847): This code is non-ideal because all these subtype
+ // relations wind up attributed to the same spans. We need
+ // to associate causes/spans with each of the relations in
+ // the stack to get this right.
+ match dir {
+ EqTo => self.equate(a_is_expected).relate(&a_ty, &b_ty),
+ SubtypeOf => self.sub(a_is_expected).relate(&a_ty, &b_ty),
+ SupertypeOf => self.sub(a_is_expected).relate_with_variance(
+ ty::Contravariant, &a_ty, &b_ty),
+ }?;
Ok(())
}
- /// Attempts to generalize `ty` for the type variable `for_vid`. This checks for cycle -- that
- /// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also
- /// replace all regions with fresh variables. Returns `TyError` in the case of a cycle, `Ok`
+ /// Attempts to generalize `ty` for the type variable `for_vid`.
+ /// This checks for cycle -- that is, whether the type `ty`
+ /// references `for_vid`. If `is_eq_relation` is false, it will
+ /// also replace all regions/unbound-type-variables with fresh
+ /// variables. Returns `TyError` in the case of a cycle, `Ok`
/// otherwise.
+ ///
+ /// Preconditions:
+ ///
+ /// - `for_vid` is a "root vid"
fn generalize(&self,
ty: Ty<'tcx>,
for_vid: ty::TyVid,
- make_region_vars: bool)
+ is_eq_relation: bool)
-> RelateResult<'tcx, Ty<'tcx>>
{
let mut generalize = Generalizer {
infcx: self.infcx,
span: self.trace.cause.span,
- for_vid: for_vid,
- make_region_vars: make_region_vars,
+ for_vid_sub_root: self.infcx.type_variables.borrow_mut().sub_root_var(for_vid),
+ is_eq_relation: is_eq_relation,
cycle_detected: false
};
let u = ty.fold_with(&mut generalize);
struct Generalizer<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
span: Span,
- for_vid: ty::TyVid,
- make_region_vars: bool,
+ for_vid_sub_root: ty::TyVid,
+ is_eq_relation: bool,
cycle_detected: bool,
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
// Check to see whether the type we are genealizing references
- // `vid`. At the same time, also update any type variables to
- // the values that they are bound to. This is needed to truly
- // check for cycles, but also just makes things readable.
- //
- // (In particular, you could have something like `$0 = Box<$1>`
- // where `$1` has already been instantiated with `Box<$0>`)
+ // any other type variable related to `vid` via
+ // subtyping. This is basically our "occurs check", preventing
+ // us from creating infinitely sized types.
match t.sty {
ty::TyInfer(ty::TyVar(vid)) => {
let mut variables = self.infcx.type_variables.borrow_mut();
let vid = variables.root_var(vid);
- if vid == self.for_vid {
+ let sub_vid = variables.sub_root_var(vid);
+ if sub_vid == self.for_vid_sub_root {
+ // If sub-roots are equal, then `for_vid` and
+ // `vid` are related via subtyping.
self.cycle_detected = true;
self.tcx().types.err
} else {
drop(variables);
self.fold_ty(u)
}
- None => t,
+ None => {
+ if !self.is_eq_relation {
+ let origin = variables.origin(vid);
+ let new_var_id = variables.new_var(false, origin, None);
+ let u = self.tcx().mk_var(new_var_id);
+ debug!("generalize: replacing original vid={:?} with new={:?}",
+ vid, u);
+ u
+ } else {
+ t
+ }
+ }
}
}
}
ty::ReScope(..) |
ty::ReVar(..) |
ty::ReFree(..) => {
- if !self.make_region_vars {
+ if self.is_eq_relation {
return r;
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::combine::CombineFields;
+use super::combine::{CombineFields, RelationDir};
use super::{Subtype};
-use super::type_variable::{EqTo};
use ty::{self, Ty, TyCtxt};
use ty::TyVar;
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
(&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
- infcx.type_variables.borrow_mut().relate_vars(a_id, EqTo, b_id);
+ infcx.type_variables.borrow_mut().equate(a_id, b_id);
Ok(a)
}
(&ty::TyInfer(TyVar(a_id)), _) => {
- self.fields.instantiate(b, EqTo, a_id, self.a_is_expected)?;
+ self.fields.instantiate(b, RelationDir::EqTo, a_id, self.a_is_expected)?;
Ok(a)
}
(_, &ty::TyInfer(TyVar(b_id))) => {
- self.fields.instantiate(a, EqTo, b_id, self.a_is_expected)?;
+ self.fields.instantiate(a, RelationDir::EqTo, b_id, self.a_is_expected)?;
Ok(a)
}
use ty::{Region, Issue32330};
use ty::error::TypeError;
use syntax_pos::{Pos, Span};
-use errors::DiagnosticBuilder;
+use errors::{DiagnosticBuilder, DiagnosticStyledString};
mod note;
}
}
+ /// Given that `other_ty` is the same as a type argument for `name` in `sub`, populate `value`
+ /// highlighting `name` and every type argument that isn't at `pos` (which is `other_ty`), and
+ /// populate `other_value` with `other_ty`.
+ ///
+ /// ```text
+ /// Foo<Bar<Qux>>
+ /// ^^^^--------^ this is highlighted
+ /// | |
+ /// | this type argument is exactly the same as the other type, not highlighted
+ /// this is highlighted
+ /// Bar<Qux>
+ /// -------- this type is the same as a type argument in the other type, not highlighted
+ /// ```
+ fn highlight_outer(&self,
+ mut value: &mut DiagnosticStyledString,
+ mut other_value: &mut DiagnosticStyledString,
+ name: String,
+ sub: &ty::subst::Substs<'tcx>,
+ pos: usize,
+ other_ty: &ty::Ty<'tcx>) {
+ // `value` and `other_value` hold two incomplete type representation for display.
+ // `name` is the path of both types being compared. `sub`
+ value.push_highlighted(name);
+ let len = sub.len();
+ if len > 0 {
+ value.push_highlighted("<");
+ }
+
+ // Output the lifetimes fot the first type
+ let lifetimes = sub.regions().map(|lifetime| {
+ let s = format!("{}", lifetime);
+ if s.is_empty() {
+ "'_".to_string()
+ } else {
+ s
+ }
+ }).collect::<Vec<_>>().join(", ");
+ if !lifetimes.is_empty() {
+ if sub.regions().count() < len {
+ value.push_normal(lifetimes + &", ");
+ } else {
+ value.push_normal(lifetimes);
+ }
+ }
+
+ // Highlight all the type arguments that aren't at `pos` and compare the type argument at
+ // `pos` and `other_ty`.
+ for (i, type_arg) in sub.types().enumerate() {
+ if i == pos {
+ let values = self.cmp(type_arg, other_ty);
+ value.0.extend((values.0).0);
+ other_value.0.extend((values.1).0);
+ } else {
+ value.push_highlighted(format!("{}", type_arg));
+ }
+
+ if len > 0 && i != len - 1 {
+ value.push_normal(", ");
+ }
+ //self.push_comma(&mut value, &mut other_value, len, i);
+ }
+ if len > 0 {
+ value.push_highlighted(">");
+ }
+ }
+
+ /// If `other_ty` is the same as a type argument present in `sub`, highlight `path` in `t1_out`,
+ /// as that is the difference to the other type.
+ ///
+ /// For the following code:
+ ///
+ /// ```norun
+ /// let x: Foo<Bar<Qux>> = foo::<Bar<Qux>>();
+ /// ```
+ ///
+ /// The type error output will behave in the following way:
+ ///
+ /// ```text
+ /// Foo<Bar<Qux>>
+ /// ^^^^--------^ this is highlighted
+ /// | |
+ /// | this type argument is exactly the same as the other type, not highlighted
+ /// this is highlighted
+ /// Bar<Qux>
+ /// -------- this type is the same as a type argument in the other type, not highlighted
+ /// ```
+ fn cmp_type_arg(&self,
+ mut t1_out: &mut DiagnosticStyledString,
+ mut t2_out: &mut DiagnosticStyledString,
+ path: String,
+ sub: &ty::subst::Substs<'tcx>,
+ other_path: String,
+ other_ty: &ty::Ty<'tcx>) -> Option<()> {
+ for (i, ta) in sub.types().enumerate() {
+ if &ta == other_ty {
+ self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty);
+ return Some(());
+ }
+ if let &ty::TyAdt(def, _) = &ta.sty {
+ let path_ = self.tcx.item_path_str(def.did.clone());
+ if path_ == other_path {
+ self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty);
+ return Some(());
+ }
+ }
+ }
+ None
+ }
+
+ /// Add a `,` to the type representation only if it is appropriate.
+ fn push_comma(&self,
+ value: &mut DiagnosticStyledString,
+ other_value: &mut DiagnosticStyledString,
+ len: usize,
+ pos: usize) {
+ if len > 0 && pos != len - 1 {
+ value.push_normal(", ");
+ other_value.push_normal(", ");
+ }
+ }
+
+ /// Compare two given types, eliding parts that are the same between them and highlighting
+ /// relevant differences, and return two representation of those types for highlighted printing.
+ fn cmp(&self, t1: ty::Ty<'tcx>, t2: ty::Ty<'tcx>)
+ -> (DiagnosticStyledString, DiagnosticStyledString)
+ {
+ match (&t1.sty, &t2.sty) {
+ (&ty::TyAdt(def1, sub1), &ty::TyAdt(def2, sub2)) => {
+ let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
+ let path1 = self.tcx.item_path_str(def1.did.clone());
+ let path2 = self.tcx.item_path_str(def2.did.clone());
+ if def1.did == def2.did {
+ // Easy case. Replace same types with `_` to shorten the output and highlight
+ // the differing ones.
+ // let x: Foo<Bar, Qux> = y::<Foo<Quz, Qux>>();
+ // Foo<Bar, _>
+ // Foo<Quz, _>
+ // --- ^ type argument elided
+ // |
+ // highlighted in output
+ values.0.push_normal(path1);
+ values.1.push_normal(path2);
+
+ // Only draw `<...>` if there're lifetime/type arguments.
+ let len = sub1.len();
+ if len > 0 {
+ values.0.push_normal("<");
+ values.1.push_normal("<");
+ }
+
+ fn lifetime_display(lifetime: &Region) -> String {
+ let s = format!("{}", lifetime);
+ if s.is_empty() {
+ "'_".to_string()
+ } else {
+ s
+ }
+ }
+ // At one point we'd like to elide all lifetimes here, they are irrelevant for
+ // all diagnostics that use this output
+ //
+ // Foo<'x, '_, Bar>
+ // Foo<'y, '_, Qux>
+ // ^^ ^^ --- type arguments are not elided
+ // | |
+ // | elided as they were the same
+ // not elided, they were different, but irrelevant
+ let lifetimes = sub1.regions().zip(sub2.regions());
+ for (i, lifetimes) in lifetimes.enumerate() {
+ let l1 = lifetime_display(lifetimes.0);
+ let l2 = lifetime_display(lifetimes.1);
+ if l1 == l2 {
+ values.0.push_normal("'_");
+ values.1.push_normal("'_");
+ } else {
+ values.0.push_highlighted(l1);
+ values.1.push_highlighted(l2);
+ }
+ self.push_comma(&mut values.0, &mut values.1, len, i);
+ }
+
+ // We're comparing two types with the same path, so we compare the type
+ // arguments for both. If they are the same, do not highlight and elide from the
+ // output.
+ // Foo<_, Bar>
+ // Foo<_, Qux>
+ // ^ elided type as this type argument was the same in both sides
+ let type_arguments = sub1.types().zip(sub2.types());
+ let regions_len = sub1.regions().collect::<Vec<_>>().len();
+ for (i, (ta1, ta2)) in type_arguments.enumerate() {
+ let i = i + regions_len;
+ if ta1 == ta2 {
+ values.0.push_normal("_");
+ values.1.push_normal("_");
+ } else {
+ let (x1, x2) = self.cmp(ta1, ta2);
+ (values.0).0.extend(x1.0);
+ (values.1).0.extend(x2.0);
+ }
+ self.push_comma(&mut values.0, &mut values.1, len, i);
+ }
+
+ // Close the type argument bracket.
+ // Only draw `<...>` if there're lifetime/type arguments.
+ if len > 0 {
+ values.0.push_normal(">");
+ values.1.push_normal(">");
+ }
+ values
+ } else {
+ // Check for case:
+ // let x: Foo<Bar<Qux> = foo::<Bar<Qux>>();
+ // Foo<Bar<Qux>
+ // ------- this type argument is exactly the same as the other type
+ // Bar<Qux>
+ if self.cmp_type_arg(&mut values.0,
+ &mut values.1,
+ path1.clone(),
+ sub1,
+ path2.clone(),
+ &t2).is_some() {
+ return values;
+ }
+ // Check for case:
+ // let x: Bar<Qux> = y:<Foo<Bar<Qux>>>();
+ // Bar<Qux>
+ // Foo<Bar<Qux>>
+ // ------- this type argument is exactly the same as the other type
+ if self.cmp_type_arg(&mut values.1,
+ &mut values.0,
+ path2,
+ sub2,
+ path1,
+ &t1).is_some() {
+ return values;
+ }
+
+ // We couldn't find anything in common, highlight everything.
+ // let x: Bar<Qux> = y::<Foo<Zar>>();
+ (DiagnosticStyledString::highlighted(format!("{}", t1)),
+ DiagnosticStyledString::highlighted(format!("{}", t2)))
+ }
+ }
+ _ => {
+ if t1 == t2 {
+ // The two types are the same, elide and don't highlight.
+ (DiagnosticStyledString::normal("_"), DiagnosticStyledString::normal("_"))
+ } else {
+ // We couldn't find anything in common, highlight everything.
+ (DiagnosticStyledString::highlighted(format!("{}", t1)),
+ DiagnosticStyledString::highlighted(format!("{}", t2)))
+ }
+ }
+ }
+ }
+
pub fn note_type_err(&self,
diag: &mut DiagnosticBuilder<'tcx>,
cause: &ObligationCause<'tcx>,
if let Some((expected, found)) = expected_found {
match (terr, is_simple_error, expected == found) {
- (&TypeError::Sorts(ref values), false, true) => {
+ (&TypeError::Sorts(ref values), false, true) => {
diag.note_expected_found_extra(
- &"type", &expected, &found,
+ &"type", expected, found,
&format!(" ({})", values.expected.sort_string(self.tcx)),
&format!(" ({})", values.found.sort_string(self.tcx)));
}
(_, false, _) => {
- diag.note_expected_found(&"type", &expected, &found);
+ diag.note_expected_found(&"type", expected, found);
}
_ => (),
}
diag
}
- /// Returns a string of the form "expected `{}`, found `{}`".
- fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<(String, String)> {
+ fn values_str(&self, values: &ValuePairs<'tcx>)
+ -> Option<(DiagnosticStyledString, DiagnosticStyledString)>
+ {
match *values {
- infer::Types(ref exp_found) => self.expected_found_str(exp_found),
+ infer::Types(ref exp_found) => self.expected_found_str_ty(exp_found),
infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found),
infer::PolyTraitRefs(ref exp_found) => self.expected_found_str(exp_found),
}
}
+ fn expected_found_str_ty(&self,
+ exp_found: &ty::error::ExpectedFound<ty::Ty<'tcx>>)
+ -> Option<(DiagnosticStyledString, DiagnosticStyledString)> {
+ let exp_found = self.resolve_type_vars_if_possible(exp_found);
+ if exp_found.references_error() {
+ return None;
+ }
+
+ Some(self.cmp(exp_found.expected, exp_found.found))
+ }
+
+ /// Returns a string of the form "expected `{}`, found `{}`".
fn expected_found_str<T: fmt::Display + TypeFoldable<'tcx>>(
&self,
exp_found: &ty::error::ExpectedFound<T>)
- -> Option<(String, String)>
+ -> Option<(DiagnosticStyledString, DiagnosticStyledString)>
{
let exp_found = self.resolve_type_vars_if_possible(exp_found);
if exp_found.references_error() {
return None;
}
- Some((format!("{}", exp_found.expected), format!("{}", exp_found.found)))
+ Some((DiagnosticStyledString::highlighted(format!("{}", exp_found.expected)),
+ DiagnosticStyledString::highlighted(format!("{}", exp_found.found))))
}
fn report_generic_bound_failure(&self,
match *origin {
infer::Subtype(ref trace) => {
if let Some((expected, found)) = self.values_str(&trace.values) {
+ let expected = expected.content();
+ let found = found.content();
// FIXME: do we want a "the" here?
err.span_note(trace.cause.span,
&format!("...so that {} (expected {}, found {})",
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use ty::{self, TyCtxt};
+use infer::type_variable::TypeVariableMap;
+use ty::{self, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder};
use super::InferCtxt;
/// the actual types (`?T`, `Option<?T`) -- and remember that
/// after the snapshot is popped, the variable `?T` is no longer
/// unified.
- ///
- /// Assumptions:
- /// - no new type variables are created during `f()` (asserted
- /// below); this simplifies our logic since we don't have to
- /// check for escaping type variables
pub fn fudge_regions_if_ok<T, E, F>(&self,
origin: &RegionVariableOrigin,
f: F) -> Result<T, E> where
F: FnOnce() -> Result<T, E>,
T: TypeFoldable<'tcx>,
{
- let (region_vars, value) = self.probe(|snapshot| {
- let vars_at_start = self.type_variables.borrow().num_vars();
+ debug!("fudge_regions_if_ok(origin={:?})", origin);
+ let (type_variables, region_vars, value) = self.probe(|snapshot| {
match f() {
Ok(value) => {
let value = self.resolve_type_vars_if_possible(&value);
// At this point, `value` could in principle refer
- // to regions that have been created during the
- // snapshot (we assert below that `f()` does not
- // create any new type variables, so there
- // shouldn't be any of those). Once we exit
- // `probe()`, those are going to be popped, so we
- // will have to eliminate any references to them.
-
- assert_eq!(self.type_variables.borrow().num_vars(), vars_at_start,
- "type variables were created during fudge_regions_if_ok");
+ // to types/regions that have been created during
+ // the snapshot. Once we exit `probe()`, those are
+ // going to be popped, so we will have to
+ // eliminate any references to them.
+
+ let type_variables =
+ self.type_variables.borrow_mut().types_created_since_snapshot(
+ &snapshot.type_snapshot);
let region_vars =
self.region_vars.vars_created_since_snapshot(
&snapshot.region_vars_snapshot);
- Ok((region_vars, value))
+ Ok((type_variables, region_vars, value))
}
Err(e) => Err(e),
}
})?;
// At this point, we need to replace any of the now-popped
- // region variables that appear in `value` with a fresh region
- // variable. We can't do this during the probe because they
- // would just get popped then too. =)
+ // type/region variables that appear in `value` with a fresh
+ // variable of the appropriate kind. We can't do this during
+ // the probe because they would just get popped then too. =)
// Micro-optimization: if no variables have been created, then
// `value` can't refer to any of them. =) So we can just return it.
- if region_vars.is_empty() {
+ if type_variables.is_empty() && region_vars.is_empty() {
return Ok(value);
}
let mut fudger = RegionFudger {
infcx: self,
+ type_variables: &type_variables,
region_vars: ®ion_vars,
origin: origin
};
pub struct RegionFudger<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ type_variables: &'a TypeVariableMap,
region_vars: &'a Vec<ty::RegionVid>,
origin: &'a RegionVariableOrigin,
}
self.infcx.tcx
}
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.sty {
+ ty::TyInfer(ty::InferTy::TyVar(vid)) => {
+ match self.type_variables.get(&vid) {
+ None => {
+ // This variable was created before the
+ // "fudging". Since we refresh all type
+ // variables to their binding anyhow, we know
+ // that it is unbound, so we can just return
+ // it.
+ debug_assert!(self.infcx.type_variables.borrow_mut().probe(vid).is_none());
+ ty
+ }
+
+ Some(&origin) => {
+ // This variable was created during the
+ // fudging. Recreate it with a fresh variable
+ // here.
+ self.infcx.next_ty_var(origin)
+ }
+ }
+ }
+ _ => ty.super_fold_with(self),
+ }
+ }
+
fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
match *r {
ty::ReVar(v) if self.region_vars.contains(&v) => {
match variance {
ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b),
ty::Covariant => self.relate(a, b),
- ty::Bivariant => self.fields.bivariate(self.a_is_expected).relate(a, b),
+ // FIXME(#41044) -- not correct, need test
+ ty::Bivariant => Ok(a.clone()),
ty::Contravariant => self.fields.lub(self.a_is_expected).relate(a, b),
}
}
// Relates the type `v` to `a` and `b` such that `v` represents
// the LUB/GLB of `a` and `b` as appropriate.
+ //
+ // Subtle hack: ordering *may* be significant here. This method
+ // relates `v` to `a` first, which may help us to avoid unecessary
+ // type variable obligations. See caller for details.
fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
}
Ok(v)
}
- (&ty::TyInfer(TyVar(..)), _) |
+ // If one side is known to be a variable and one is not,
+ // create a variable (`v`) to represent the LUB. Make sure to
+ // relate `v` to the non-type-variable first (by passing it
+ // first to `relate_bound`). Otherwise, we would produce a
+ // subtype obligation that must then be processed.
+ //
+ // Example: if the LHS is a type variable, and RHS is
+ // `Box<i32>`, then we current compare `v` to the RHS first,
+ // which will instantiate `v` with `Box<i32>`. Then when `v`
+ // is compared to the LHS, we instantiate LHS with `Box<i32>`.
+ // But if we did in reverse order, we would create a `v <:
+ // LHS` (or vice versa) constraint and then instantiate
+ // `v`. This would require further processing to achieve same
+ // end-result; in partiular, this screws up some of the logic
+ // in coercion, which expects LUB to figure out that the LHS
+ // is (e.g.) `Box<i32>`. A more obvious solution might be to
+ // iterate on the subtype obligations that are returned, but I
+ // think this suffices. -nmatsakis
+ (&ty::TyInfer(TyVar(..)), _) => {
+ let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
+ this.relate_bound(v, b, a)?;
+ Ok(v)
+ }
(_, &ty::TyInfer(TyVar(..))) => {
let v = infcx.next_ty_var(TypeVariableOrigin::LatticeVariable(this.cause().span));
this.relate_bound(v, a, b)?;
match variance {
ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b),
ty::Covariant => self.relate(a, b),
- ty::Bivariant => self.fields.bivariate(self.a_is_expected).relate(a, b),
+ // FIXME(#41044) -- not correct, need test
+ ty::Bivariant => Ok(a.clone()),
ty::Contravariant => self.fields.glb(self.a_is_expected).relate(a, b),
}
}
use self::type_variable::TypeVariableOrigin;
use self::unify_key::ToType;
-mod bivariate;
mod combine;
mod equate;
pub mod error_reporting;
}
impl<T> ExpectedFound<T> {
- fn new(a_is_expected: bool, a: T, b: T) -> Self {
+ pub fn new(a_is_expected: bool, a: T, b: T) -> Self {
if a_is_expected {
ExpectedFound {expected: a, found: b}
} else {
self.probe(|_| {
let origin = &ObligationCause::dummy();
let trace = TypeTrace::types(origin, true, a, b);
- self.sub(true, trace, &a, &b).map(|InferOk { obligations, .. }| {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty());
+ self.sub(true, trace, &a, &b).map(|InferOk { obligations: _, .. }| {
+ // Ignore obligations, since we are unrolling
+ // everything anyway.
})
})
}
})
}
+ pub fn subtype_predicate(&self,
+ cause: &ObligationCause<'tcx>,
+ predicate: &ty::PolySubtypePredicate<'tcx>)
+ -> Option<InferResult<'tcx, ()>>
+ {
+ // Subtle: it's ok to skip the binder here and resolve because
+ // `shallow_resolve` just ignores anything that is not a type
+ // variable, and because type variable's can't (at present, at
+ // least) capture any of the things bound by this binder.
+ //
+ // Really, there is no *particular* reason to do this
+ // `shallow_resolve` here except as a
+ // micro-optimization. Naturally I could not
+ // resist. -nmatsakis
+ let two_unbound_type_vars = {
+ let a = self.shallow_resolve(predicate.skip_binder().a);
+ let b = self.shallow_resolve(predicate.skip_binder().b);
+ a.is_ty_var() && b.is_ty_var()
+ };
+
+ if two_unbound_type_vars {
+ // Two unbound type variables? Can't make progress.
+ return None;
+ }
+
+ Some(self.commit_if_ok(|snapshot| {
+ let (ty::SubtypePredicate { a_is_expected, a, b}, skol_map) =
+ self.skolemize_late_bound_regions(predicate, snapshot);
+
+ let cause_span = cause.span;
+ let ok = self.sub_types(a_is_expected, cause, a, b)?;
+ self.leak_check(false, cause_span, &skol_map, snapshot)?;
+ self.pop_skolemized(skol_map, snapshot);
+ Ok(ok.unit())
+ }))
+ }
+
pub fn region_outlives_predicate(&self,
cause: &traits::ObligationCause<'tcx>,
predicate: &ty::PolyRegionOutlivesPredicate<'tcx>)
// except according to those terms.
use super::SubregionOrigin;
-use super::combine::CombineFields;
-use super::type_variable::{SubtypeOf, SupertypeOf};
+use super::combine::{CombineFields, RelationDir};
+use traits::Obligation;
use ty::{self, Ty, TyCtxt};
use ty::TyVar;
+use ty::fold::TypeFoldable;
use ty::relate::{Cause, Relate, RelateResult, TypeRelation};
use std::mem;
match variance {
ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b),
ty::Covariant => self.relate(a, b),
- ty::Bivariant => self.fields.bivariate(self.a_is_expected).relate(a, b),
+ ty::Bivariant => Ok(a.clone()),
ty::Contravariant => self.with_expected_switched(|this| { this.relate(b, a) }),
}
}
let a = infcx.type_variables.borrow_mut().replace_if_possible(a);
let b = infcx.type_variables.borrow_mut().replace_if_possible(b);
match (&a.sty, &b.sty) {
- (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => {
- infcx.type_variables
- .borrow_mut()
- .relate_vars(a_id, SubtypeOf, b_id);
+ (&ty::TyInfer(TyVar(a_vid)), &ty::TyInfer(TyVar(b_vid))) => {
+ // Shouldn't have any LBR here, so we can safely put
+ // this under a binder below without fear of accidental
+ // capture.
+ assert!(!a.has_escaping_regions());
+ assert!(!b.has_escaping_regions());
+
+ // can't make progress on `A <: B` if both A and B are
+ // type variables, so record an obligation. We also
+ // have to record in the `type_variables` tracker that
+ // the two variables are equal modulo subtyping, which
+ // is important to the occurs check later on.
+ infcx.type_variables.borrow_mut().sub(a_vid, b_vid);
+ self.fields.obligations.push(
+ Obligation::new(
+ self.fields.trace.cause.clone(),
+ ty::Predicate::Subtype(
+ ty::Binder(ty::SubtypePredicate {
+ a_is_expected: self.a_is_expected,
+ a,
+ b,
+ }))));
+
Ok(a)
}
(&ty::TyInfer(TyVar(a_id)), _) => {
self.fields
- .instantiate(b, SupertypeOf, a_id, !self.a_is_expected)?;
+ .instantiate(b, RelationDir::SupertypeOf, a_id, !self.a_is_expected)?;
Ok(a)
}
(_, &ty::TyInfer(TyVar(b_id))) => {
- self.fields.instantiate(a, SubtypeOf, b_id, self.a_is_expected)?;
+ self.fields.instantiate(a, RelationDir::SubtypeOf, b_id, self.a_is_expected)?;
Ok(a)
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-pub use self::RelationDir::*;
use self::TypeVariableValue::*;
-use self::UndoEntry::*;
use hir::def_id::{DefId};
-use syntax::util::small_vector::SmallVector;
use syntax::ast;
use syntax_pos::Span;
use ty::{self, Ty};
use std::marker::PhantomData;
use std::mem;
use std::u32;
+use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::snapshot_vec as sv;
use rustc_data_structures::unify as ut;
pub struct TypeVariableTable<'tcx> {
values: sv::SnapshotVec<Delegate<'tcx>>,
+
+ /// Two variables are unified in `eq_relations` when we have a
+ /// constraint `?X == ?Y`.
eq_relations: ut::UnificationTable<ty::TyVid>,
+
+ /// Two variables are unified in `eq_relations` when we have a
+ /// constraint `?X <: ?Y` *or* a constraint `?Y <: ?X`. This second
+ /// table exists only to help with the occurs check. In particular,
+ /// we want to report constraints like these as an occurs check
+ /// violation:
+ ///
+ /// ?1 <: ?3
+ /// Box<?3> <: ?1
+ ///
+ /// This works because `?1` and `?3` are unified in the
+ /// `sub_relations` relation (not in `eq_relations`). Then when we
+ /// process the `Box<?3> <: ?1` constraint, we do an occurs check
+ /// on `Box<?3>` and find a potential cycle.
+ ///
+ /// This is reasonable because, in Rust, subtypes have the same
+ /// "skeleton" and hence there is no possible type such that
+ /// (e.g.) `Box<?3> <: ?3` for any `?3`.
+ sub_relations: ut::UnificationTable<ty::TyVid>,
}
/// Reasons to create a type inference variable
-#[derive(Debug)]
+#[derive(Copy, Clone, Debug)]
pub enum TypeVariableOrigin {
MiscVariable(Span),
NormalizeProjectionType(Span),
DivergingBlockExpr(Span),
DivergingFn(Span),
LatticeVariable(Span),
+ Generalized(ty::TyVid),
}
+pub type TypeVariableMap = FxHashMap<ty::TyVid, TypeVariableOrigin>;
+
struct TypeVariableData<'tcx> {
value: TypeVariableValue<'tcx>,
origin: TypeVariableOrigin,
enum TypeVariableValue<'tcx> {
Known(Ty<'tcx>),
Bounded {
- relations: Vec<Relation>,
default: Option<Default<'tcx>>
}
}
pub struct Snapshot {
snapshot: sv::Snapshot,
eq_snapshot: ut::Snapshot<ty::TyVid>,
+ sub_snapshot: ut::Snapshot<ty::TyVid>,
}
-enum UndoEntry<'tcx> {
- // The type of the var was specified.
- SpecifyVar(ty::TyVid, Vec<Relation>, Option<Default<'tcx>>),
- Relate(ty::TyVid, ty::TyVid),
- RelateRange(ty::TyVid, usize),
+struct Instantiate<'tcx> {
+ vid: ty::TyVid,
+ default: Option<Default<'tcx>>,
}
struct Delegate<'tcx>(PhantomData<&'tcx ()>);
-type Relation = (RelationDir, ty::TyVid);
-
-#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
-pub enum RelationDir {
- SubtypeOf, SupertypeOf, EqTo, BiTo
-}
-
-impl RelationDir {
- fn opposite(self) -> RelationDir {
- match self {
- SubtypeOf => SupertypeOf,
- SupertypeOf => SubtypeOf,
- EqTo => EqTo,
- BiTo => BiTo,
- }
- }
-}
-
impl<'tcx> TypeVariableTable<'tcx> {
pub fn new() -> TypeVariableTable<'tcx> {
TypeVariableTable {
values: sv::SnapshotVec::new(),
eq_relations: ut::UnificationTable::new(),
+ sub_relations: ut::UnificationTable::new(),
}
}
- fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
- relations(self.values.get_mut(a.index as usize))
- }
-
pub fn default(&self, vid: ty::TyVid) -> Option<Default<'tcx>> {
match &self.values.get(vid.index as usize).value {
&Known(_) => None,
&self.values.get(vid.index as usize).origin
}
- /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
+ /// Records that `a == b`, depending on `dir`.
///
/// Precondition: neither `a` nor `b` are known.
- pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
- let a = self.root_var(a);
- let b = self.root_var(b);
- if a != b {
- if dir == EqTo {
- // a and b must be equal which we mark in the unification table
- let root = self.eq_relations.union(a, b);
- // In addition to being equal, all relations from the variable which is no longer
- // the root must be added to the root so they are not forgotten as the other
- // variable should no longer be referenced (other than to get the root)
- let other = if a == root { b } else { a };
- let count = {
- let (relations, root_relations) = if other.index < root.index {
- let (pre, post) = self.values.split_at_mut(root.index as usize);
- (relations(&mut pre[other.index as usize]), relations(&mut post[0]))
- } else {
- let (pre, post) = self.values.split_at_mut(other.index as usize);
- (relations(&mut post[0]), relations(&mut pre[root.index as usize]))
- };
- root_relations.extend_from_slice(relations);
- relations.len()
- };
- self.values.record(RelateRange(root, count));
- } else {
- self.relations(a).push((dir, b));
- self.relations(b).push((dir.opposite(), a));
- self.values.record(Relate(a, b));
- }
- }
+ pub fn equate(&mut self, a: ty::TyVid, b: ty::TyVid) {
+ debug_assert!(self.probe(a).is_none());
+ debug_assert!(self.probe(b).is_none());
+ self.eq_relations.union(a, b);
+ self.sub_relations.union(a, b);
}
- /// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the
- /// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)`
- /// where `vid1` is some other variable id.
+ /// Records that `a <: b`, depending on `dir`.
///
- /// Precondition: `vid` must be a root in the unification table
- pub fn instantiate_and_push(
- &mut self,
- vid: ty::TyVid,
- ty: Ty<'tcx>,
- stack: &mut SmallVector<(Ty<'tcx>, RelationDir, ty::TyVid)>)
- {
- debug_assert!(self.root_var(vid) == vid);
- let old_value = {
- let value_ptr = &mut self.values.get_mut(vid.index as usize).value;
- mem::replace(value_ptr, Known(ty))
- };
+ /// Precondition: neither `a` nor `b` are known.
+ pub fn sub(&mut self, a: ty::TyVid, b: ty::TyVid) {
+ debug_assert!(self.probe(a).is_none());
+ debug_assert!(self.probe(b).is_none());
+ self.sub_relations.union(a, b);
+ }
+
+ /// Instantiates `vid` with the type `ty`.
+ ///
+ /// Precondition: `vid` must not have been previously instantiated.
+ pub fn instantiate(&mut self, vid: ty::TyVid, ty: Ty<'tcx>) {
+ let vid = self.root_var(vid);
+ debug_assert!(self.probe_root(vid).is_none());
- let (relations, default) = match old_value {
- Bounded { relations, default } => (relations, default),
- Known(_) => bug!("Asked to instantiate variable that is \
- already instantiated")
+ let old_value = {
+ let vid_data = &mut self.values[vid.index as usize];
+ mem::replace(&mut vid_data.value, TypeVariableValue::Known(ty))
};
- for &(dir, vid) in &relations {
- stack.push((ty, dir, vid));
+ match old_value {
+ TypeVariableValue::Bounded { default } => {
+ self.values.record(Instantiate { vid: vid, default: default });
+ }
+ TypeVariableValue::Known(old_ty) => {
+ bug!("instantiating type variable `{:?}` twice: new-value = {:?}, old-value={:?}",
+ vid, ty, old_ty)
+ }
}
-
- self.values.record(SpecifyVar(vid, relations, default));
}
pub fn new_var(&mut self,
default: Option<Default<'tcx>>,) -> ty::TyVid {
debug!("new_var(diverging={:?}, origin={:?})", diverging, origin);
self.eq_relations.new_key(());
+ self.sub_relations.new_key(());
let index = self.values.push(TypeVariableData {
- value: Bounded { relations: vec![], default: default },
+ value: Bounded { default: default },
origin: origin,
diverging: diverging
});
self.values.len()
}
+ /// Returns the "root" variable of `vid` in the `eq_relations`
+ /// equivalence table. All type variables that have been equated
+ /// will yield the same root variable (per the union-find
+ /// algorithm), so `root_var(a) == root_var(b)` implies that `a ==
+ /// b` (transitively).
pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
self.eq_relations.find(vid)
}
+ /// Returns the "root" variable of `vid` in the `sub_relations`
+ /// equivalence table. All type variables that have been are
+ /// related via equality or subtyping will yield the same root
+ /// variable (per the union-find algorithm), so `sub_root_var(a)
+ /// == sub_root_var(b)` implies that:
+ ///
+ /// exists X. (a <: X || X <: a) && (b <: X || X <: b)
+ pub fn sub_root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
+ self.sub_relations.find(vid)
+ }
+
+ /// True if `a` and `b` have same "sub-root" (i.e., exists some
+ /// type X such that `forall i in {a, b}. (i <: X || X <: i)`.
+ pub fn sub_unified(&mut self, a: ty::TyVid, b: ty::TyVid) -> bool {
+ self.sub_root_var(a) == self.sub_root_var(b)
+ }
+
pub fn probe(&mut self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
let vid = self.root_var(vid);
self.probe_root(vid)
}
+ pub fn origin(&self, vid: ty::TyVid) -> TypeVariableOrigin {
+ self.values.get(vid.index as usize).origin.clone()
+ }
+
/// Retrieves the type of `vid` given that it is currently a root in the unification table
pub fn probe_root(&mut self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
debug_assert!(self.root_var(vid) == vid);
Snapshot {
snapshot: self.values.start_snapshot(),
eq_snapshot: self.eq_relations.snapshot(),
+ sub_snapshot: self.sub_relations.snapshot(),
}
}
}
});
- self.values.rollback_to(s.snapshot);
- self.eq_relations.rollback_to(s.eq_snapshot);
+ let Snapshot { snapshot, eq_snapshot, sub_snapshot } = s;
+ self.values.rollback_to(snapshot);
+ self.eq_relations.rollback_to(eq_snapshot);
+ self.sub_relations.rollback_to(sub_snapshot);
}
pub fn commit(&mut self, s: Snapshot) {
- self.values.commit(s.snapshot);
- self.eq_relations.commit(s.eq_snapshot);
+ let Snapshot { snapshot, eq_snapshot, sub_snapshot } = s;
+ self.values.commit(snapshot);
+ self.eq_relations.commit(eq_snapshot);
+ self.sub_relations.commit(sub_snapshot);
+ }
+
+ /// Returns a map `{V1 -> V2}`, where the keys `{V1}` are
+ /// ty-variables created during the snapshot, and the values
+ /// `{V2}` are the root variables that they were unified with,
+ /// along with their origin.
+ pub fn types_created_since_snapshot(&mut self, s: &Snapshot) -> TypeVariableMap {
+ let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot);
+
+ actions_since_snapshot
+ .iter()
+ .filter_map(|action| match action {
+ &sv::UndoLog::NewElem(index) => Some(ty::TyVid { index: index as u32 }),
+ _ => None,
+ })
+ .map(|vid| {
+ let origin = self.values.get(vid.index as usize).origin.clone();
+ (vid, origin)
+ })
+ .collect()
}
pub fn types_escaping_snapshot(&mut self, s: &Snapshot) -> Vec<Ty<'tcx>> {
debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
}
- sv::UndoLog::Other(SpecifyVar(vid, ..)) => {
+ sv::UndoLog::Other(Instantiate { vid, .. }) => {
if vid.index < new_elem_threshold {
// quick check to see if this variable was
// created since the snapshot started or not.
impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
type Value = TypeVariableData<'tcx>;
- type Undo = UndoEntry<'tcx>;
-
- fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry<'tcx>) {
- match action {
- SpecifyVar(vid, relations, default) => {
- values[vid.index as usize].value = Bounded {
- relations: relations,
- default: default
- };
- }
+ type Undo = Instantiate<'tcx>;
- Relate(a, b) => {
- relations(&mut (*values)[a.index as usize]).pop();
- relations(&mut (*values)[b.index as usize]).pop();
- }
-
- RelateRange(i, n) => {
- let relations = relations(&mut (*values)[i.index as usize]);
- for _ in 0..n {
- relations.pop();
- }
- }
- }
- }
-}
-
-fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
- match v.value {
- Known(_) => bug!("var_sub_var: variable is known"),
- Bounded { ref mut relations, .. } => relations
+ fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: Instantiate<'tcx>) {
+ let Instantiate { vid, default } = action;
+ values[vid.index as usize].value = Bounded {
+ default: default
+ };
}
}
#![feature(i128_type)]
#![feature(libc)]
#![feature(loop_break_value)]
+#![feature(never_type)]
#![feature(nonzero)]
#![cfg_attr(stage0, feature(pub_restricted))]
#![feature(quote)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(discriminant_value)]
+#![feature(sort_unstable)]
extern crate arena;
extern crate core;
use hir::map as hir_map;
use hir::map::definitions::{Definitions, DefKey, DisambiguatedDefPathData};
use hir::svh::Svh;
+use ich;
use middle::lang_items;
use ty::{self, TyCtxt};
use session::Session;
#[derive(Clone, Debug)]
pub struct LinkMeta {
- pub crate_name: Symbol,
pub crate_hash: Svh,
}
pub path_len: usize,
}
+pub struct EncodedMetadata {
+ pub raw_data: Vec<u8>,
+ pub hashes: Vec<EncodedMetadataHash>,
+}
+
+/// The hash for some metadata that (when saving) will be exported
+/// from this crate, or which (when importing) was exported by an
+/// upstream crate.
+#[derive(Debug, RustcEncodable, RustcDecodable, Copy, Clone)]
+pub struct EncodedMetadataHash {
+ pub def_index: DefIndex,
+ pub hash: ich::Fingerprint,
+}
+
/// A store of Rust crates, through with their metadata
/// can be accessed.
pub trait CrateStore {
fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
- reachable: &NodeSet) -> Vec<u8>;
+ reachable: &NodeSet)
+ -> EncodedMetadata;
fn metadata_encoding_version(&self) -> &[u8];
}
fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
- reachable: &NodeSet) -> Vec<u8> { vec![] }
+ reachable: &NodeSet)
+ -> EncodedMetadata {
+ bug!("encode_metadata")
+ }
fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") }
}
hir::ItemStruct(..) | hir::ItemUnion(..) => {
let def_id = self.tcx.hir.local_def_id(item.id);
let def = self.tcx.lookup_adt_def(def_id);
- self.struct_has_extern_repr = def.repr.c;
+ self.struct_has_extern_repr = def.repr.c();
intravisit::walk_item(self, &item);
}
//! `unsafe`.
use self::RootUnsafeContext::*;
-use dep_graph::DepNode;
use ty::{self, Ty, TyCtxt};
use ty::MethodCall;
use lint;
}
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- let _task = tcx.dep_graph.in_task(DepNode::EffectCheck);
-
let mut visitor = EffectCheckVisitor {
tcx: tcx,
tables: &ty::TypeckTables::empty(),
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
+ ty::Predicate::Subtype(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::ClosureKind(..) |
map.relate_free_regions(frs[1], frs[2]);
assert_eq!(map.lub_free_regions(frs[0], frs[1]), ty::ReFree(frs[2]));
}
+
+impl_stable_hash_for!(struct FreeRegionMap {
+ relation
+});
_ => return ty
};
- if def.variants.len() == 2 && !def.repr.c && def.repr.int.is_none() {
+ if def.variants.len() == 2 && !def.repr.c() && def.repr.int.is_none() {
let data_idx;
if def.variants[0].fields.is_empty() {
use self::LiveNodeKind::*;
use self::VarKind::*;
-use dep_graph::DepNode;
use hir::def::*;
use ty::{self, TyCtxt, ParameterEnvironment};
use traits::{self, Reveal};
}
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- let _task = tcx.dep_graph.in_task(DepNode::Liveness);
tcx.hir.krate().visit_all_item_likes(&mut IrMaps::new(tcx).as_deep_visitor());
tcx.sess.abort_if_errors();
}
hir::ItemMod(..) | hir::ItemForeignMod(..) |
hir::ItemImpl(..) | hir::ItemTrait(..) |
hir::ItemStruct(..) | hir::ItemEnum(..) |
- hir::ItemUnion(..) | hir::ItemDefaultImpl(..) => {}
+ hir::ItemUnion(..) | hir::ItemDefaultImpl(..) |
+ hir::ItemGlobalAsm(..) => {}
}
}
hir_map::NodeTraitItem(trait_method) => {
hir::ItemUse(..) |
hir::ItemMod(..) |
hir::ItemDefaultImpl(..) |
- hir::ItemForeignMod(..) => {
+ hir::ItemForeignMod(..) |
+ hir::ItemGlobalAsm(..) => {
// These sorts of items have no lifetime parameters at all.
intravisit::walk_item(self, item);
}
pub fn temps_iter<'a>(&'a self) -> impl Iterator<Item=Local> + 'a {
(self.arg_count+1..self.local_decls.len()).filter_map(move |index| {
let local = Local::new(index);
- if self.local_decls[local].source_info.is_none() {
- Some(local)
- } else {
+ if self.local_decls[local].is_user_variable {
None
+ } else {
+ Some(local)
}
})
}
pub fn vars_iter<'a>(&'a self) -> impl Iterator<Item=Local> + 'a {
(self.arg_count+1..self.local_decls.len()).filter_map(move |index| {
let local = Local::new(index);
- if self.local_decls[local].source_info.is_none() {
- None
- } else {
+ if self.local_decls[local].is_user_variable {
Some(local)
+ } else {
+ None
}
})
}
/// Temporaries and the return pointer are always mutable.
pub mutability: Mutability,
+ /// True if this corresponds to a user-declared local variable.
+ pub is_user_variable: bool,
+
/// Type of this local.
pub ty: Ty<'tcx>,
/// to generate better debuginfo.
pub name: Option<Name>,
- /// For user-declared variables, stores their source information.
- ///
- /// For temporaries, this is `None`.
- ///
- /// This is the primary way to differentiate between user-declared
- /// variables and compiler-generated temporaries.
- pub source_info: Option<SourceInfo>,
+ /// Source info of the local.
+ pub source_info: SourceInfo,
}
impl<'tcx> LocalDecl<'tcx> {
/// Create a new `LocalDecl` for a temporary.
#[inline]
- pub fn new_temp(ty: Ty<'tcx>) -> Self {
+ pub fn new_temp(ty: Ty<'tcx>, span: Span) -> Self {
LocalDecl {
mutability: Mutability::Mut,
ty: ty,
name: None,
- source_info: None,
+ source_info: SourceInfo {
+ span: span,
+ scope: ARGUMENT_VISIBILITY_SCOPE
+ },
+ is_user_variable: false
}
}
///
/// This must be inserted into the `local_decls` list as the first local.
#[inline]
- pub fn new_return_pointer(return_ty: Ty) -> LocalDecl {
+ pub fn new_return_pointer(return_ty: Ty, span: Span) -> LocalDecl {
LocalDecl {
mutability: Mutability::Mut,
ty: return_ty,
- source_info: None,
+ source_info: SourceInfo {
+ span: span,
+ scope: ARGUMENT_VISIBILITY_SCOPE
+ },
name: None, // FIXME maybe we do want some name here?
+ is_user_variable: false
}
}
}
ref $($mutability)* ty,
name: _,
ref $($mutability)* source_info,
+ is_user_variable: _,
} = *local_decl;
self.visit_ty(ty);
- if let Some(ref $($mutability)* info) = *source_info {
- self.visit_source_info(info);
- }
+ self.visit_source_info(source_info);
}
fn super_visibility_scope(&mut self,
Some("one of: `address`, `leak`, `memory` or `thread`");
pub const parse_linker_flavor: Option<&'static str> =
Some(::rustc_back::LinkerFlavor::one_of());
+ pub const parse_optimization_fuel: Option<&'static str> =
+ Some("crate=integer");
}
#[allow(dead_code)]
}
true
}
+
+ fn parse_optimization_fuel(slot: &mut Option<(String, u64)>, v: Option<&str>) -> bool {
+ match v {
+ None => false,
+ Some(s) => {
+ let parts = s.split('=').collect::<Vec<_>>();
+ if parts.len() != 2 { return false; }
+ let crate_name = parts[0].to_string();
+ let fuel = parts[1].parse::<u64>();
+ if fuel.is_err() { return false; }
+ *slot = Some((crate_name, fuel.unwrap()));
+ true
+ }
+ }
+ }
}
) }
"Use a sanitizer"),
linker_flavor: Option<LinkerFlavor> = (None, parse_linker_flavor, [UNTRACKED],
"Linker flavor"),
+ fuel: Option<(String, u64)> = (None, parse_optimization_fuel, [TRACKED],
+ "Set the optimization fuel quota for a crate."),
+ print_fuel: Option<String> = (None, parse_opt_string, [TRACKED],
+ "Make Rustc print the total optimization fuel used by a crate."),
}
pub fn default_lib_output() -> CrateType {
"NAME"),
opt::multi_s("", "emit", "Comma separated list of types of output for \
the compiler to emit",
- "[asm|llvm-bc|llvm-ir|obj|metadata|link|dep-info]"),
+ "[asm|llvm-bc|llvm-ir|obj|metadata|link|dep-info|mir]"),
opt::multi_s("", "print", "Comma separated list of compiler information to \
print on stdout", &format!("[{}]",
&print_opts.join("|"))),
impl_dep_tracking_hash_via_hash!(bool);
impl_dep_tracking_hash_via_hash!(usize);
+ impl_dep_tracking_hash_via_hash!(u64);
impl_dep_tracking_hash_via_hash!(String);
impl_dep_tracking_hash_via_hash!(lint::Level);
impl_dep_tracking_hash_via_hash!(Option<bool>);
impl_dep_tracking_hash_via_hash!(Option<usize>);
impl_dep_tracking_hash_via_hash!(Option<String>);
+ impl_dep_tracking_hash_via_hash!(Option<(String, u64)>);
impl_dep_tracking_hash_via_hash!(Option<PanicStrategy>);
impl_dep_tracking_hash_via_hash!(Option<lint::Level>);
impl_dep_tracking_hash_via_hash!(Option<PathBuf>);
impl_dep_tracking_hash_for_sortable_vec_of!((String, lint::Level));
impl_dep_tracking_hash_for_sortable_vec_of!((String, Option<String>,
Option<cstore::NativeLibraryKind>));
+ impl_dep_tracking_hash_for_sortable_vec_of!((String, u64));
impl DepTrackingHash for SearchPaths {
fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) {
let mut elems: Vec<_> = self
use dep_graph::DepGraph;
use hir::def_id::{CrateNum, DefIndex};
-use hir::svh::Svh;
use lint;
use middle::cstore::CrateStore;
use middle::dependency_format;
pub code_stats: RefCell<CodeStats>,
next_node_id: Cell<ast::NodeId>,
+
+ /// If -zfuel=crate=n is specified, Some(crate).
+ optimization_fuel_crate: Option<String>,
+ /// If -zfuel=crate=n is specified, initially set to n. Otherwise 0.
+ optimization_fuel_limit: Cell<u64>,
+ /// We're rejecting all further optimizations.
+ out_of_fuel: Cell<bool>,
+
+ // The next two are public because the driver needs to read them.
+
+ /// If -zprint-fuel=crate, Some(crate).
+ pub print_fuel_crate: Option<String>,
+ /// Always set to zero and incremented so that we can print fuel expended by a crate.
+ pub print_fuel: Cell<u64>,
}
pub struct PerfStats {
/// Returns the symbol name for the registrar function,
/// given the crate Svh and the function DefIndex.
- pub fn generate_plugin_registrar_symbol(&self, svh: &Svh, index: DefIndex)
+ pub fn generate_plugin_registrar_symbol(&self, disambiguator: Symbol, index: DefIndex)
-> String {
- format!("__rustc_plugin_registrar__{}_{}", svh, index.as_usize())
+ format!("__rustc_plugin_registrar__{}_{}", disambiguator, index.as_usize())
}
- pub fn generate_derive_registrar_symbol(&self,
- svh: &Svh,
- index: DefIndex) -> String {
- format!("__rustc_derive_registrar__{}_{}", svh, index.as_usize())
+ pub fn generate_derive_registrar_symbol(&self, disambiguator: Symbol, index: DefIndex)
+ -> String {
+ format!("__rustc_derive_registrar__{}_{}", disambiguator, index.as_usize())
}
pub fn sysroot<'a>(&'a self) -> &'a Path {
println!("Total time spent decoding DefPath tables: {}",
duration_to_secs_str(self.perf_stats.decode_def_path_tables_time.get()));
}
+
+ /// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
+ /// This expends fuel if applicable, and records fuel if applicable.
+ pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
+ let mut ret = true;
+ match self.optimization_fuel_crate {
+ Some(ref c) if c == crate_name => {
+ let fuel = self.optimization_fuel_limit.get();
+ ret = fuel != 0;
+ if fuel == 0 && !self.out_of_fuel.get() {
+ println!("optimization-fuel-exhausted: {}", msg());
+ self.out_of_fuel.set(true);
+ } else if fuel > 0 {
+ self.optimization_fuel_limit.set(fuel-1);
+ }
+ }
+ _ => {}
+ }
+ match self.print_fuel_crate {
+ Some(ref c) if c == crate_name=> {
+ self.print_fuel.set(self.print_fuel.get()+1);
+ },
+ _ => {}
+ }
+ ret
+ }
}
pub fn build_session(sopts: config::Options,
}
);
+ let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone());
+ let optimization_fuel_limit = Cell::new(sopts.debugging_opts.fuel.as_ref()
+ .map(|i| i.1).unwrap_or(0));
+ let print_fuel_crate = sopts.debugging_opts.print_fuel.clone();
+ let print_fuel = Cell::new(0);
+
let sess = Session {
dep_graph: dep_graph.clone(),
target: target_cfg,
decode_def_path_tables_time: Cell::new(Duration::from_secs(0)),
},
code_stats: RefCell::new(CodeStats::new()),
+ optimization_fuel_crate: optimization_fuel_crate,
+ optimization_fuel_limit: optimization_fuel_limit,
+ print_fuel_crate: print_fuel_crate,
+ print_fuel: print_fuel,
+ out_of_fuel: Cell::new(false),
};
init_llvm(&sess);
use ty::fast_reject;
use ty::fold::TypeFolder;
use ty::subst::Subst;
+use ty::SubtypePredicate;
use util::nodemap::{FxHashMap, FxHashSet};
use syntax_pos::{DUMMY_SP, Span};
found_pattern: Option<&'a Pat>,
}
+impl<'a, 'gcx, 'tcx> FindLocalByTypeVisitor<'a, 'gcx, 'tcx> {
+ fn is_match(&self, ty: Ty<'tcx>) -> bool {
+ ty == *self.target_ty || match (&ty.sty, &self.target_ty.sty) {
+ (&ty::TyInfer(ty::TyVar(a_vid)), &ty::TyInfer(ty::TyVar(b_vid))) =>
+ self.infcx.type_variables
+ .borrow_mut()
+ .sub_unified(a_vid, b_vid),
+
+ _ => false,
+ }
+ }
+}
+
impl<'a, 'gcx, 'tcx> Visitor<'a> for FindLocalByTypeVisitor<'a, 'gcx, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'a> {
NestedVisitorMap::None
fn visit_local(&mut self, local: &'a Local) {
if let Some(&ty) = self.infcx.tables.borrow().node_types.get(&local.id) {
let ty = self.infcx.resolve_type_vars_if_possible(&ty);
- let is_match = ty.walk().any(|t| t == *self.target_ty);
+ let is_match = ty.walk().any(|t| self.is_match(t));
if is_match && self.found_pattern.is_none() {
self.found_pattern = Some(&*local.pat);
FulfillmentErrorCode::CodeAmbiguity => {
self.maybe_report_ambiguity(&error.obligation);
}
+ FulfillmentErrorCode::CodeSubtypeError(ref expected_found, ref err) => {
+ self.report_mismatched_types(&error.obligation.cause,
+ expected_found.expected,
+ expected_found.found,
+ err.clone())
+ .emit();
+ }
}
}
err
}
+ ty::Predicate::Subtype(ref predicate) => {
+ // Errors for Subtype predicates show up as
+ // `FulfillmentErrorCode::CodeSubtypeError`,
+ // not selection error.
+ span_bug!(span, "subtype requirement gave wrong error: `{:?}`", predicate)
+ }
+
ty::Predicate::Equate(ref predicate) => {
let predicate = self.resolve_type_vars_if_possible(predicate);
let err = self.equality_predicate(&obligation.cause,
}
}
+ ty::Predicate::Subtype(ref data) => {
+ if data.references_error() || self.tcx.sess.has_errors() {
+ // no need to overload user in such cases
+ } else {
+ let &SubtypePredicate { a_is_expected: _, a, b } = data.skip_binder();
+ // both must be type variables, or the other would've been instantiated
+ assert!(a.is_ty_var() && b.is_ty_var());
+ self.need_type_info(obligation, a);
+ }
+ }
+
_ => {
if !self.tcx.sess.has_errors() {
let mut err = struct_span_err!(self.tcx.sess,
use dep_graph::DepGraph;
use infer::{InferCtxt, InferOk};
use ty::{self, Ty, TypeFoldable, ToPolyTraitRef, TyCtxt, ToPredicate};
+use ty::error::ExpectedFound;
use rustc_data_structures::obligation_forest::{ObligationForest, Error};
use rustc_data_structures::obligation_forest::{ForestObligation, ObligationProcessor};
use std::marker::PhantomData;
s => Ok(s)
}
}
+
+ ty::Predicate::Subtype(ref subtype) => {
+ match selcx.infcx().subtype_predicate(&obligation.cause, subtype) {
+ None => {
+ // none means that both are unresolved
+ pending_obligation.stalled_on = vec![subtype.skip_binder().a,
+ subtype.skip_binder().b];
+ Ok(None)
+ }
+ Some(Ok(ok)) => {
+ Ok(Some(ok.obligations))
+ }
+ Some(Err(err)) => {
+ let expected_found = ExpectedFound::new(subtype.skip_binder().a_is_expected,
+ subtype.skip_binder().a,
+ subtype.skip_binder().b);
+ Err(FulfillmentErrorCode::CodeSubtypeError(expected_found, err))
+ }
+ }
+ }
}
}
use middle::free_region::FreeRegionMap;
use ty::subst::Substs;
use ty::{self, Ty, TyCtxt, TypeFoldable, ToPredicate};
-use infer::InferCtxt;
+use ty::error::{ExpectedFound, TypeError};
+use infer::{InferCtxt};
use std::rc::Rc;
use syntax::ast;
pub use self::object_safety::ObjectSafetyViolation;
pub use self::object_safety::MethodViolationCode;
pub use self::select::{EvaluationCache, SelectionContext, SelectionCache};
-pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch};
-pub use self::select::{MethodMatchedData}; // intentionally don't export variants
pub use self::specialize::{OverlapError, specialization_graph, specializes, translate_substs};
pub use self::specialize::{SpecializesCache, find_associated_item};
pub use self::util::elaborate_predicates;
pub enum FulfillmentErrorCode<'tcx> {
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
+ CodeSubtypeError(ExpectedFound<Ty<'tcx>>,
+ TypeError<'tcx>), // always comes from a SubtypePredicate
CodeAmbiguity,
}
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::ClosureKind(..) |
+ ty::Predicate::Subtype(..) |
ty::Predicate::Equate(..) => {
false
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
+ ty::Predicate::Subtype(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
//! See `README.md` for high-level documentation
-pub use self::MethodMatchResult::*;
-pub use self::MethodMatchedData::*;
use self::SelectionCandidate::*;
use self::EvaluationResult::*;
SelectionResult<'tcx, SelectionCandidate<'tcx>>>>,
}
-pub enum MethodMatchResult {
- MethodMatched(MethodMatchedData),
- MethodAmbiguous(/* list of impls that could apply */ Vec<DefId>),
- MethodDidNotMatch,
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum MethodMatchedData {
- // In the case of a precise match, we don't really need to store
- // how the match was found. So don't.
- PreciseMethodMatch,
-
- // In the case of a coercion, we need to know the precise impl so
- // that we can determine the type to which things were coerced.
- CoerciveMethodMatch(/* impl we matched */ DefId)
-}
-
/// The selection process begins by considering all impls, where
/// clauses, and so forth that might resolve an obligation. Sometimes
/// we'll be able to say definitively that (e.g.) an impl does not
}
}
+ ty::Predicate::Subtype(ref p) => {
+ // does this code ever run?
+ match self.infcx.subtype_predicate(&obligation.cause, p) {
+ Some(Ok(InferOk { obligations, .. })) => {
+ self.inferred_obligations.extend(obligations);
+ EvaluatedToOk
+ },
+ Some(Err(_)) => EvaluatedToErr,
+ None => EvaluatedToAmbig,
+ }
+ }
+
ty::Predicate::WellFormed(ty) => {
match ty::wf::obligations(self.infcx, obligation.cause.body_id,
ty, obligation.cause.span) {
if other.evaluation == EvaluatedToOk {
if let ImplCandidate(victim_def) = victim.candidate {
let tcx = self.tcx().global_tcx();
- return traits::specializes(tcx, other_def, victim_def);
+ return traits::specializes(tcx, other_def, victim_def) ||
+ tcx.impls_are_allowed_to_overlap(other_def, victim_def);
}
}
}
}
}
-
-impl MethodMatchResult {
- pub fn may_apply(&self) -> bool {
- match *self {
- MethodMatched(_) => true,
- MethodAmbiguous(_) => true,
- MethodDidNotMatch => false,
- }
- }
-}
possible_sibling,
impl_def_id);
if let Some(impl_header) = overlap {
+ if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
+ return Ok((false, false));
+ }
+
let le = specializes(tcx, impl_def_id, possible_sibling);
let ge = specializes(tcx, possible_sibling, impl_def_id);
match *self {
super::CodeSelectionError(ref e) => write!(f, "{:?}", e),
super::CodeProjectionError(ref e) => write!(f, "{:?}", e),
+ super::CodeSubtypeError(ref a, ref b) =>
+ write!(f, "CodeSubtypeError({:?}, {:?})", a, b),
super::CodeAmbiguity => write!(f, "Ambiguity")
}
}
ty::Predicate::ObjectSafe(data),
ty::Predicate::ClosureKind(closure_def_id, kind) =>
- ty::Predicate::ClosureKind(closure_def_id, kind)
+ ty::Predicate::ClosureKind(closure_def_id, kind),
+
+ ty::Predicate::Subtype(ref data) =>
+ ty::Predicate::Subtype(tcx.anonymize_late_bound_regions(data)),
}
}
// `X == Y`, though conceivably we might. For example,
// `&X == &Y` implies that `X == Y`.
}
+ ty::Predicate::Subtype(..) => {
+ // Currently, we do not "elaborate" predicates like `X
+ // <: Y`, though conceivably we might.
+ }
ty::Predicate::Projection(..) => {
// Nothing to elaborate in a projection predicate.
}
/// Go from a safe fn pointer to an unsafe fn pointer.
UnsafeFnPointer,
- // Go from a non-capturing closure to an fn pointer.
+ /// Go from a non-capturing closure to an fn pointer.
ClosureFnPointer,
/// Go from a mut raw pointer to a const raw pointer.
ast_ty_to_ty_cache: RefCell::new(NodeMap()),
}, f)
}
+
+ pub fn consider_optimizing<T: Fn() -> String>(&self, msg: T) -> bool {
+ let cname = self.crate_name(LOCAL_CRATE).as_str();
+ self.sess.consider_optimizing(&cname, msg)
+ }
}
impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> {
use infer::InferCtxt;
use session::Session;
use traits;
-use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
+use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
use syntax::ast::{FloatTy, IntTy, UintTy};
use syntax::attr;
return (discr, ity.is_signed());
}
- if repr.c {
+ if repr.c() {
match &tcx.sess.target.target.arch[..] {
// WARNING: the ARM EABI has two variants; the one corresponding
// to `at_least == I32` appears to be used on Linux and NetBSD,
}
impl<'a, 'gcx, 'tcx> Struct {
- // FIXME(camlorn): reprs need a better representation to deal with multiple reprs on one type.
fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
repr: &ReprOptions, kind: StructKind,
scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
- let packed = repr.packed;
+ let packed = repr.packed();
let mut ret = Struct {
align: if packed { dl.i8_align } else { dl.aggregate_align },
packed: packed,
// Neither do 1-member and 2-member structs.
// In addition, code in trans assume that 2-element structs can become pairs.
// It's easier to just short-circuit here.
- let mut can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
- && ! (repr.c || repr.packed);
-
- // Disable field reordering until we can decide what to do.
- // The odd pattern here avoids a warning about the value never being read.
- if can_optimize { can_optimize = false; }
+ let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
+ && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
let (optimize, sort_ascending) = match kind {
StructKind::AlwaysSizedUnivariant => (can_optimize, false),
}
// SIMD vector types.
- ty::TyAdt(def, ..) if def.repr.simd => {
+ ty::TyAdt(def, ..) if def.repr.simd() => {
let element = ty.simd_type(tcx);
match *element.layout(infcx)? {
Scalar { value, .. } => {
field.ty(tcx, substs).layout(infcx)
}).collect::<Result<Vec<_>, _>>()?;
let layout = if def.is_union() {
- let mut un = Union::new(dl, def.repr.packed);
+ let mut un = Union::new(dl, def.repr.packed());
un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
UntaggedUnion { variants: un }
} else {
ty::TyTuple(tys, _) => tys[i],
// SIMD vector types.
- ty::TyAdt(def, ..) if def.repr.simd => {
+ ty::TyAdt(def, ..) if def.repr.simd() => {
self.ty.simd_type(tcx)
}
pub coherent_trait: coherent_trait_dep_node((CrateNum, DefId)) -> (),
+ pub borrowck: BorrowCheck(DefId) -> (),
+
/// Gets a complete map from all types to their inherent impls.
/// Not meant to be used directly outside of coherence.
/// (Defined only for LOCAL_CRATE)
}
}
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::TyS<'tcx> {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ let ty::TyS {
+ ref sty,
+
+ // The other fields just provide fast access to information that is
+ // also contained in `sty`, so no need to hash them.
+ flags: _,
+ region_depth: _,
+ } = *self;
+
+ sty.hash_stable(hcx, hasher);
+ }
+}
+
pub type Ty<'tcx> = &'tcx TyS<'tcx>;
impl<'tcx> serialize::UseSpecializedEncodable for Ty<'tcx> {}
/// for some substitutions `...` and T being a closure type.
/// Satisfied (or refuted) once we know the closure's kind.
ClosureKind(DefId, ClosureKind),
+
+ /// `T1 <: T2`
+ Subtype(PolySubtypePredicate<'tcx>),
}
impl<'a, 'gcx, 'tcx> Predicate<'tcx> {
Predicate::Trait(ty::Binder(data.subst(tcx, substs))),
Predicate::Equate(ty::Binder(ref data)) =>
Predicate::Equate(ty::Binder(data.subst(tcx, substs))),
+ Predicate::Subtype(ty::Binder(ref data)) =>
+ Predicate::Subtype(ty::Binder(data.subst(tcx, substs))),
Predicate::RegionOutlives(ty::Binder(ref data)) =>
Predicate::RegionOutlives(ty::Binder(data.subst(tcx, substs))),
Predicate::TypeOutlives(ty::Binder(ref data)) =>
&'tcx ty::Region>;
pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate<Ty<'tcx>, &'tcx ty::Region>;
+#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+pub struct SubtypePredicate<'tcx> {
+ pub a_is_expected: bool,
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>
+}
+pub type PolySubtypePredicate<'tcx> = ty::Binder<SubtypePredicate<'tcx>>;
+
/// This kind of predicate has no *direct* correspondent in the
/// syntax, but it roughly corresponds to the syntactic forms:
///
ty::Predicate::Equate(ty::Binder(ref data)) => {
vec![data.0, data.1]
}
+ ty::Predicate::Subtype(ty::Binder(SubtypePredicate { a, b, a_is_expected: _ })) => {
+ vec![a, b]
+ }
ty::Predicate::TypeOutlives(ty::Binder(ref data)) => {
vec![data.0]
}
}
Predicate::Projection(..) |
Predicate::Equate(..) |
+ Predicate::Subtype(..) |
Predicate::RegionOutlives(..) |
Predicate::WellFormed(..) |
Predicate::ObjectSafe(..) |
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum AdtKind { Struct, Union, Enum }
+bitflags! {
+ #[derive(RustcEncodable, RustcDecodable, Default)]
+ flags ReprFlags: u8 {
+ const IS_C = 1 << 0,
+ const IS_PACKED = 1 << 1,
+ const IS_SIMD = 1 << 2,
+ // Internal only for now. If true, don't reorder fields.
+ const IS_LINEAR = 1 << 3,
+
+ // Any of these flags being set prevent field reordering optimisation.
+ const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits |
+ ReprFlags::IS_PACKED.bits |
+ ReprFlags::IS_SIMD.bits |
+ ReprFlags::IS_LINEAR.bits,
+ }
+}
+
+impl_stable_hash_for!(struct ReprFlags {
+ bits
+});
+
+
+
/// Represents the repr options provided by the user,
#[derive(Copy, Clone, Eq, PartialEq, RustcEncodable, RustcDecodable, Default)]
pub struct ReprOptions {
- pub c: bool,
- pub packed: bool,
- pub simd: bool,
pub int: Option<attr::IntType>,
+ pub flags: ReprFlags,
}
impl_stable_hash_for!(struct ReprOptions {
- c,
- packed,
- simd,
- int
+ int,
+ flags
});
impl ReprOptions {
pub fn new(tcx: TyCtxt, did: DefId) -> ReprOptions {
- let mut ret = ReprOptions::default();
+ let mut flags = ReprFlags::empty();
+ let mut size = None;
for attr in tcx.get_attrs(did).iter() {
for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) {
- match r {
- attr::ReprExtern => ret.c = true,
- attr::ReprPacked => ret.packed = true,
- attr::ReprSimd => ret.simd = true,
- attr::ReprInt(i) => ret.int = Some(i),
- }
+ flags.insert(match r {
+ attr::ReprExtern => ReprFlags::IS_C,
+ attr::ReprPacked => ReprFlags::IS_PACKED,
+ attr::ReprSimd => ReprFlags::IS_SIMD,
+ attr::ReprInt(i) => {
+ size = Some(i);
+ ReprFlags::empty()
+ },
+ });
}
}
// FIXME(eddyb) This is deprecated and should be removed.
if tcx.has_attr(did, "simd") {
- ret.simd = true;
+ flags.insert(ReprFlags::IS_SIMD);
}
- ret
+ // This is here instead of layout because the choice must make it into metadata.
+ if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.item_path_str(did))) {
+ flags.insert(ReprFlags::IS_LINEAR);
+ }
+ ReprOptions { int: size, flags: flags }
}
+ #[inline]
+ pub fn simd(&self) -> bool { self.flags.contains(ReprFlags::IS_SIMD) }
+ #[inline]
+ pub fn c(&self) -> bool { self.flags.contains(ReprFlags::IS_C) }
+ #[inline]
+ pub fn packed(&self) -> bool { self.flags.contains(ReprFlags::IS_PACKED) }
+ #[inline]
+ pub fn linear(&self) -> bool { self.flags.contains(ReprFlags::IS_LINEAR) }
+
pub fn discr_type(&self) -> attr::IntType {
self.int.unwrap_or(attr::SignedInt(ast::IntTy::Is))
}
/// layout" optimizations, such as representing `Foo<&T>` as a
/// single pointer.
pub fn inhibit_enum_layout_opt(&self) -> bool {
- self.c || self.int.is_some()
+ self.c() || self.int.is_some()
}
}
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer, e.g. issue #31299.
pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
- self.calculate_sized_constraint_inner(tcx.global_tcx(), &mut Vec::new())
- }
-
- /// Calculates the Sized-constraint.
- ///
- /// As the Sized-constraint of enums can be a *set* of types,
- /// the Sized-constraint may need to be a set also. Because introducing
- /// a new type of IVar is currently a complex affair, the Sized-constraint
- /// may be a tuple.
- ///
- /// In fact, there are only a few options for the constraint:
- /// - `bool`, if the type is always Sized
- /// - an obviously-unsized type
- /// - a type parameter or projection whose Sizedness can't be known
- /// - a tuple of type parameters or projections, if there are multiple
- /// such.
- /// - a TyError, if a type contained itself. The representability
- /// check should catch this case.
- fn calculate_sized_constraint_inner(&self,
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- stack: &mut Vec<DefId>)
- -> Ty<'tcx>
- {
- if let Some(ty) = tcx.maps.adt_sized_constraint.borrow().get(&self.did) {
- return ty;
- }
-
- // Follow the memoization pattern: push the computation of
- // DepNode::SizedConstraint as our current task.
- let _task = tcx.dep_graph.in_task(DepNode::SizedConstraint(self.did));
-
- if stack.contains(&self.did) {
- debug!("calculate_sized_constraint: {:?} is recursive", self);
- // This should be reported as an error by `check_representable`.
- //
- // Consider the type as Sized in the meanwhile to avoid
- // further errors.
- tcx.maps.adt_sized_constraint.borrow_mut().insert(self.did, tcx.types.err);
- return tcx.types.err;
- }
-
- stack.push(self.did);
-
- let tys : Vec<_> =
- self.variants.iter().flat_map(|v| {
- v.fields.last()
- }).flat_map(|f| {
- let ty = tcx.item_type(f.did);
- self.sized_constraint_for_ty(tcx, stack, ty)
- }).collect();
-
- let self_ = stack.pop().unwrap();
- assert_eq!(self_, self.did);
-
- let ty = match tys.len() {
- _ if tys.references_error() => tcx.types.err,
- 0 => tcx.types.bool,
- 1 => tys[0],
- _ => tcx.intern_tup(&tys[..], false)
- };
-
- let old = tcx.maps.adt_sized_constraint.borrow().get(&self.did).cloned();
- match old {
- Some(old_ty) => {
- debug!("calculate_sized_constraint: {:?} recurred", self);
- assert_eq!(old_ty, tcx.types.err);
- old_ty
- }
- None => {
- debug!("calculate_sized_constraint: {:?} => {:?}", self, ty);
- tcx.maps.adt_sized_constraint.borrow_mut().insert(self.did, ty);
- ty
+ match queries::adt_sized_constraint::try_get(tcx, DUMMY_SP, self.did) {
+ Ok(ty) => ty,
+ Err(_) => {
+ debug!("adt_sized_constraint: {:?} is recursive", self);
+ // This should be reported as an error by `check_representable`.
+ //
+ // Consider the type as Sized in the meanwhile to avoid
+ // further errors.
+ tcx.types.err
}
}
}
fn sized_constraint_for_ty(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- stack: &mut Vec<DefId>,
ty: Ty<'tcx>)
-> Vec<Ty<'tcx>> {
let result = match ty.sty {
TyTuple(ref tys, _) => {
match tys.last() {
None => vec![],
- Some(ty) => self.sized_constraint_for_ty(tcx, stack, ty)
+ Some(ty) => self.sized_constraint_for_ty(tcx, ty)
}
}
TyAdt(adt, substs) => {
// recursive case
let adt_ty =
- adt.calculate_sized_constraint_inner(tcx, stack)
+ adt.sized_constraint(tcx)
.subst(tcx, substs);
debug!("sized_constraint_for_ty({:?}) intermediate = {:?}",
ty, adt_ty);
if let ty::TyTuple(ref tys, _) = adt_ty.sty {
tys.iter().flat_map(|ty| {
- self.sized_constraint_for_ty(tcx, stack, ty)
+ self.sized_constraint_for_ty(tcx, ty)
}).collect()
} else {
- self.sized_constraint_for_ty(tcx, stack, adt_ty)
+ self.sized_constraint_for_ty(tcx, adt_ty)
}
}
queries::impl_trait_ref::get(self, DUMMY_SP, id)
}
+ /// Returns true if the impls are the same polarity and are implementing
+ /// a trait which contains no items
+ pub fn impls_are_allowed_to_overlap(self, def_id1: DefId, def_id2: DefId) -> bool {
+ if !self.sess.features.borrow().overlapping_marker_traits {
+ return false;
+ }
+ let trait1_is_empty = self.impl_trait_ref(def_id1)
+ .map_or(false, |trait_ref| {
+ self.associated_item_def_ids(trait_ref.def_id).is_empty()
+ });
+ let trait2_is_empty = self.impl_trait_ref(def_id2)
+ .map_or(false, |trait_ref| {
+ self.associated_item_def_ids(trait_ref.def_id).is_empty()
+ });
+ self.trait_impl_polarity(def_id1) == self.trait_impl_polarity(def_id2)
+ && trait1_is_empty
+ && trait2_is_empty
+ }
+
// Returns `ty::VariantDef` if `def` refers to a struct,
// or variant or their constructors, panics otherwise.
pub fn expect_variant_def(self, def: Def) -> &'tcx VariantDef {
/// `DefId` is really just an interned def-path).
///
/// Note that if `id` is not local to this crate, the result will
- // be a non-local `DefPath`.
+ /// be a non-local `DefPath`.
pub fn def_path(self, id: DefId) -> hir_map::DefPath {
if id.is_local() {
self.hir.def_path(id)
panic!("associated item not found for def_id: {:?}", def_id);
}
+/// Calculates the Sized-constraint.
+///
+/// As the Sized-constraint of enums can be a *set* of types,
+/// the Sized-constraint may need to be a set also. Because introducing
+/// a new type of IVar is currently a complex affair, the Sized-constraint
+/// may be a tuple.
+///
+/// In fact, there are only a few options for the constraint:
+/// - `bool`, if the type is always Sized
+/// - an obviously-unsized type
+/// - a type parameter or projection whose Sizedness can't be known
+/// - a tuple of type parameters or projections, if there are multiple
+/// such.
+/// - a TyError, if a type contained itself. The representability
+/// check should catch this case.
+fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> Ty<'tcx> {
+ let def = tcx.lookup_adt_def(def_id);
+
+ let tys: Vec<_> = def.variants.iter().flat_map(|v| {
+ v.fields.last()
+ }).flat_map(|f| {
+ let ty = tcx.item_type(f.did);
+ def.sized_constraint_for_ty(tcx, ty)
+ }).collect();
+
+ let ty = match tys.len() {
+ _ if tys.references_error() => tcx.types.err,
+ 0 => tcx.types.bool,
+ 1 => tys[0],
+ _ => tcx.intern_tup(&tys[..], false)
+ };
+
+ debug!("adt_sized_constraint: {:?} => {:?}", def, ty);
+
+ ty
+}
+
pub fn provide(providers: &mut ty::maps::Providers) {
*providers = ty::maps::Providers {
associated_item,
+ adt_sized_constraint,
+ ..*providers
+ };
+}
+
+pub fn provide_extern(providers: &mut ty::maps::Providers) {
+ *providers = ty::maps::Providers {
+ adt_sized_constraint,
..*providers
};
}
}
}
+impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> {
+ type Lifted = ty::SubtypePredicate<'tcx>;
+ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>)
+ -> Option<ty::SubtypePredicate<'tcx>> {
+ tcx.lift(&(self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
+ a_is_expected: self.a_is_expected,
+ a: a,
+ b: b,
+ })
+ }
+}
+
impl<'tcx, A: Copy+Lift<'tcx>, B: Copy+Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Self::Lifted> {
ty::Predicate::Equate(ref binder) => {
tcx.lift(binder).map(ty::Predicate::Equate)
}
+ ty::Predicate::Subtype(ref binder) => {
+ tcx.lift(binder).map(ty::Predicate::Subtype)
+ }
ty::Predicate::RegionOutlives(ref binder) => {
tcx.lift(binder).map(ty::Predicate::RegionOutlives)
}
ty::Predicate::Trait(a.fold_with(folder)),
ty::Predicate::Equate(ref binder) =>
ty::Predicate::Equate(binder.fold_with(folder)),
+ ty::Predicate::Subtype(ref binder) =>
+ ty::Predicate::Subtype(binder.fold_with(folder)),
ty::Predicate::RegionOutlives(ref binder) =>
ty::Predicate::RegionOutlives(binder.fold_with(folder)),
ty::Predicate::TypeOutlives(ref binder) =>
match *self {
ty::Predicate::Trait(ref a) => a.visit_with(visitor),
ty::Predicate::Equate(ref binder) => binder.visit_with(visitor),
+ ty::Predicate::Subtype(ref binder) => binder.visit_with(visitor),
ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor),
ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor),
ty::Predicate::Projection(ref binder) => binder.visit_with(visitor),
impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
- ty::EquatePredicate(self.0.fold_with(folder),
- self.1.fold_with(folder))
+ ty::EquatePredicate(self.0.fold_with(folder), self.1.fold_with(folder))
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
}
}
+impl<'tcx> TypeFoldable<'tcx> for ty::SubtypePredicate<'tcx> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ ty::SubtypePredicate {
+ a_is_expected: self.a_is_expected,
+ a: self.a.fold_with(folder),
+ b: self.b.fold_with(folder)
+ }
+ }
+
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+ self.a.visit_with(visitor) || self.b.visit_with(visitor)
+ }
+}
+
impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::TraitPredicate {
#[inline]
pub fn is_simd(&self) -> bool {
match self.sty {
- TyAdt(def, _) => def.repr.simd,
+ TyAdt(def, _) => def.repr.simd(),
_ => false,
}
}
use hir::def_id::{DefId, LOCAL_CRATE};
use hir::map::DefPathData;
use infer::InferCtxt;
-// use hir::map as hir_map;
+use ich::{StableHashingContext, NodeIdHashingMode};
use traits::{self, Reveal};
use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable};
use ty::ParameterEnvironment;
use middle::lang_items;
use rustc_const_math::{ConstInt, ConstIsize, ConstUsize};
-use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult};
-
+use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult,
+ HashStable};
use std::cell::RefCell;
use std::cmp;
use std::hash::Hash;
}
}
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ /// Creates a hash of the type `Ty` which will be the same no matter what crate
+ /// context it's calculated within. This is used by the `type_id` intrinsic.
+ pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
+ let mut hasher = StableHasher::new();
+ let mut hcx = StableHashingContext::new(self);
+
+ hcx.while_hashing_spans(false, |hcx| {
+ hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+ ty.hash_stable(hcx, &mut hasher);
+ });
+ });
+ hasher.finish()
+ }
+}
+
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
+ ty::Predicate::Subtype(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::ClosureKind(..) |
.collect()
}
- /// Creates a hash of the type `Ty` which will be the same no matter what crate
- /// context it's calculated within. This is used by the `type_id` intrinsic.
- pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
- let mut hasher = TypeIdHasher::new(self);
- hasher.visit_ty(ty);
- hasher.finish()
- }
-
/// Calculate the destructor of a given type.
pub fn calculate_dtor(
self,
}
ty::Predicate::ClosureKind(..) => {
}
+ ty::Predicate::Subtype(ref data) => {
+ wf.compute(data.skip_binder().a); // (*)
+ wf.compute(data.skip_binder().b); // (*)
+ }
}
wf.normalize()
match obligation.predicate {
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
+ ty::Predicate::Subtype(..) |
ty::Predicate::Projection(..) |
ty::Predicate::ClosureKind(..) |
ty::Predicate::ObjectSafe(..) =>
match *self {
ty::Predicate::Trait(ref a) => write!(f, "{:?}", a),
ty::Predicate::Equate(ref pair) => write!(f, "{:?}", pair),
+ ty::Predicate::Subtype(ref pair) => write!(f, "{:?}", pair),
ty::Predicate::RegionOutlives(ref pair) => write!(f, "{:?}", pair),
ty::Predicate::TypeOutlives(ref pair) => write!(f, "{:?}", pair),
ty::Predicate::Projection(ref pair) => write!(f, "{:?}", pair),
}
}
+impl<'tcx> fmt::Display for ty::Binder<ty::SubtypePredicate<'tcx>> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self)))
+ }
+}
+
impl<'tcx> fmt::Display for ty::Binder<ty::ProjectionPredicate<'tcx>> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self)))
}
}
+impl<'tcx> fmt::Display for ty::SubtypePredicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{} <: {}", self.a, self.b)
+ }
+}
+
impl<'tcx> fmt::Debug for ty::TraitPredicate<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TraitPredicate({:?})",
match *self {
ty::Predicate::Trait(ref data) => write!(f, "{}", data),
ty::Predicate::Equate(ref predicate) => write!(f, "{}", predicate),
+ ty::Predicate::Subtype(ref predicate) => write!(f, "{}", predicate),
ty::Predicate::RegionOutlives(ref predicate) => write!(f, "{}", predicate),
ty::Predicate::TypeOutlives(ref predicate) => write!(f, "{}", predicate),
ty::Predicate::Projection(ref predicate) => write!(f, "{}", predicate),
pub fn target() -> Result<Target, String> {
let mut post_link_args = LinkArgs::new();
- post_link_args.insert(LinkerFlavor::Gcc,
+ post_link_args.insert(LinkerFlavor::Em,
vec!["-s".to_string(),
"BINARYEN=1".to_string(),
"-s".to_string(),
data
}
- fn create_drop_flag(&mut self, index: MovePathIndex) {
+ fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
let tcx = self.tcx;
let patch = &mut self.patch;
debug!("create_drop_flag({:?})", self.mir.span);
self.drop_flags.entry(index).or_insert_with(|| {
- patch.new_temp(tcx.types.bool)
+ patch.new_temp(tcx.types.bool, span)
});
}
debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
child, location, path, (maybe_live, maybe_dead));
if maybe_live && maybe_dead {
- self.create_drop_flag(child)
+ self.create_drop_flag(child, terminator.source_info.span)
}
});
}
use self::InteriorKind::*;
-use rustc::dep_graph::DepNode;
use rustc::hir::map as hir_map;
use rustc::hir::map::blocks::FnLikeNode;
use rustc::cfg;
use rustc::middle::mem_categorization::ImmutabilityBlame;
use rustc::middle::region;
use rustc::ty::{self, TyCtxt};
+use rustc::ty::maps::Providers;
use std::fmt;
use std::rc::Rc;
use std::hash::{Hash, Hasher};
use syntax::ast;
-use syntax_pos::{MultiSpan, Span};
+use syntax_pos::{DUMMY_SP, MultiSpan, Span};
use errors::DiagnosticBuilder;
use rustc::hir;
pub type LoanDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, LoanDataFlowOperator>;
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- tcx.dep_graph.with_task(DepNode::BorrowCheckKrate, tcx, (), check_crate_task);
-
- fn check_crate_task<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (): ()) {
- tcx.visit_all_bodies_in_krate(|body_owner_def_id, body_id| {
- tcx.dep_graph.with_task(DepNode::BorrowCheck(body_owner_def_id),
- tcx,
- body_id,
- borrowck_fn);
- });
- }
+ tcx.visit_all_bodies_in_krate(|body_owner_def_id, _body_id| {
+ ty::queries::borrowck::get(tcx, DUMMY_SP, body_owner_def_id);
+ });
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ borrowck,
+ ..*providers
+ };
}
/// Collection of conclusions determined via borrow checker analyses.
pub move_data: move_data::FlowedMoveData<'a, 'tcx>,
}
-fn borrowck_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, body_id: hir::BodyId) {
- debug!("borrowck_fn(body_id={:?})", body_id);
+fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) {
+ debug!("borrowck(body_owner_def_id={:?})", owner_def_id);
- let owner_id = tcx.hir.body_owner(body_id);
- let owner_def_id = tcx.hir.local_def_id(owner_id);
+ let owner_id = tcx.hir.as_local_node_id(owner_def_id).unwrap();
+ let body_id = tcx.hir.body_owned_by(owner_id);
let attributes = tcx.get_attrs(owner_def_id);
let tables = tcx.item_tables(owner_def_id);
/// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`.
pub fn add_move(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
- lp: Rc<LoanPath<'tcx>>,
+ orig_lp: Rc<LoanPath<'tcx>>,
id: ast::NodeId,
kind: MoveKind) {
- // Moving one union field automatically moves all its fields.
- if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind {
- if let ty::TyAdt(adt_def, _) = base_lp.ty.sty {
+ // Moving one union field automatically moves all its fields. Also move siblings of
+ // all parent union fields, moves do not propagate upwards automatically.
+ let mut lp = orig_lp.clone();
+ while let LpExtend(ref base_lp, mutbl, lp_elem) = lp.clone().kind {
+ if let (&ty::TyAdt(adt_def, _), LpInterior(opt_variant_id, interior))
+ = (&base_lp.ty.sty, lp_elem) {
if adt_def.is_union() {
for field in &adt_def.struct_variant().fields {
let field = InteriorKind::InteriorField(mc::NamedField(field.name));
- let field_ty = if field == interior {
- lp.ty
- } else {
- tcx.types.err // Doesn't matter
- };
- let sibling_lp_kind = LpExtend(base_lp.clone(), mutbl,
- LpInterior(opt_variant_id, field));
- let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty));
- self.add_move_helper(tcx, sibling_lp, id, kind);
+ if field != interior {
+ let sibling_lp_kind =
+ LpExtend(base_lp.clone(), mutbl, LpInterior(opt_variant_id, field));
+ let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, tcx.types.err));
+ self.add_move_helper(tcx, sibling_lp, id, kind);
+ }
}
- return;
}
}
+ lp = base_lp.clone();
}
- self.add_move_helper(tcx, lp.clone(), id, kind);
+ self.add_move_helper(tcx, orig_lp.clone(), id, kind);
}
fn add_move_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub mod graphviz;
+pub use borrowck::provide;
+
__build_diagnostic_array! { librustc_borrowck, DIAGNOSTICS }
use std::mem;
use std::collections::range::RangeArgument;
use std::collections::Bound::{Excluded, Included, Unbounded};
+use std::mem::ManuallyDrop;
pub unsafe trait Array {
type Element;
- type PartialStorage: Default + Unsize<[ManuallyDrop<Self::Element>]>;
+ type PartialStorage: Unsize<[ManuallyDrop<Self::Element>]>;
const LEN: usize;
}
const LEN: usize = 8;
}
+unsafe impl<T> Array for [T; 32] {
+ type Element = T;
+ type PartialStorage = [ManuallyDrop<T>; 32];
+ const LEN: usize = 32;
+}
+
pub struct ArrayVec<A: Array> {
count: usize,
values: A::PartialStorage
pub fn new() -> Self {
ArrayVec {
count: 0,
- values: Default::default(),
+ values: unsafe { ::std::mem::uninitialized() },
}
}
/// Panics when the stack vector is full.
pub fn push(&mut self, el: A::Element) {
let arr = &mut self.values as &mut [ManuallyDrop<_>];
- arr[self.count] = ManuallyDrop { value: el };
+ arr[self.count] = ManuallyDrop::new(el);
self.count += 1;
}
let arr = &mut self.values as &mut [ManuallyDrop<_>];
self.count -= 1;
unsafe {
- let value = ptr::read(&arr[self.count]);
- Some(value.value)
+ let value = ptr::read(&*arr[self.count]);
+ Some(value)
}
} else {
None
fn next(&mut self) -> Option<A::Element> {
let arr = &self.store as &[ManuallyDrop<_>];
unsafe {
- self.indices.next().map(|i| ptr::read(&arr[i]).value)
+ self.indices.next().map(|i| ptr::read(&*arr[i]))
}
}
#[inline]
fn next(&mut self) -> Option<A::Element> {
- self.iter.next().map(|elt| unsafe { ptr::read(elt as *const ManuallyDrop<_>).value })
+ self.iter.next().map(|elt| unsafe { ptr::read(&**elt) })
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter_mut()
}
}
-
-// FIXME: This should use repr(transparent) from rust-lang/rfcs#1758.
-#[allow(unions_with_drop_fields)]
-pub union ManuallyDrop<T> {
- value: T,
- #[allow(dead_code)]
- empty: (),
-}
-
-impl<T> ManuallyDrop<T> {
- fn new() -> ManuallyDrop<T> {
- ManuallyDrop {
- empty: ()
- }
- }
-}
-
-impl<T> Default for ManuallyDrop<T> {
- fn default() -> Self {
- ManuallyDrop::new()
- }
-}
t: [u64; 2],
c: usize,
outlen: u16,
- finalized: bool
+ finalized: bool,
+
+ #[cfg(debug_assertions)]
+ fnv_hash: u64,
}
+#[cfg(debug_assertions)]
impl ::std::fmt::Debug for Blake2bCtx {
- fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
- try!(write!(fmt, "hash: "));
- for v in &self.h {
- try!(write!(fmt, "{:x}", v));
- }
- Ok(())
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ write!(fmt, "{:x}", self.fnv_hash)
+ }
+}
+
+#[cfg(not(debug_assertions))]
+impl ::std::fmt::Debug for Blake2bCtx {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ write!(fmt, "Enable debug_assertions() for more info.")
}
}
c: 0,
outlen: outlen as u16,
finalized: false,
+
+ #[cfg(debug_assertions)]
+ fnv_hash: 0xcbf29ce484222325,
};
ctx.h[0] ^= 0x01010000 ^ ((key.len() << 8) as u64) ^ (outlen as u64);
checked_mem_copy(data, &mut ctx.b[ctx.c .. ], bytes_to_copy);
ctx.c += bytes_to_copy;
}
+
+ #[cfg(debug_assertions)]
+ {
+ // compute additional FNV hash for simpler to read debug output
+ const MAGIC_PRIME: u64 = 0x00000100000001b3;
+
+ for &byte in data {
+ ctx.fnv_hash = (ctx.fnv_hash ^ byte as u64).wrapping_mul(MAGIC_PRIME);
+ }
+ }
}
fn blake2b_final(ctx: &mut Blake2bCtx)
#![feature(conservative_impl_trait)]
#![feature(discriminant_value)]
#![feature(specialization)]
+#![feature(manually_drop)]
+#![feature(struct_field_attributes)]
#![cfg_attr(unix, feature(libc))]
#![cfg_attr(test, feature(test))]
obligation: &mut Self::Obligation)
-> Result<Option<Vec<Self::Obligation>>, Self::Error>;
- fn process_backedge<'c, I>(&mut self, cycle: I,
+ /// As we do the cycle check, we invoke this callback when we
+ /// encounter an actual cycle. `cycle` is an iterator that starts
+ /// at the start of the cycle in the stack and walks **toward the
+ /// top**.
+ ///
+ /// In other words, if we had O1 which required O2 which required
+ /// O3 which required O1, we would give an iterator yielding O1,
+ /// O2, O3 (O1 is not yielded twice).
+ fn process_backedge<'c, I>(&mut self,
+ cycle: I,
_marker: PhantomData<&'c Self::Obligation>)
where I: Clone + Iterator<Item=&'c Self::Obligation>;
}
}
}
Entry::Vacant(v) => {
- debug!("register_obligation_at({:?}, {:?}) - ok",
- obligation, parent);
+ debug!("register_obligation_at({:?}, {:?}) - ok, new index is {}",
+ obligation, parent, self.nodes.len());
v.insert(NodeIndex::new(self.nodes.len()));
self.cache_list.push(obligation.as_predicate().clone());
self.nodes.push(Node::new(parent, obligation));
where P: ObligationProcessor<Obligation=O>
{
let mut stack = self.scratch.take().unwrap();
+ debug_assert!(stack.is_empty());
+
+ debug!("process_cycles()");
for index in 0..self.nodes.len() {
// For rustc-benchmarks/inflate-0.1.0 this state test is extremely
}
}
+ debug!("process_cycles: complete");
+
+ debug_assert!(stack.is_empty());
self.scratch = Some(stack);
}
NodeState::OnDfsStack => {
let index =
stack.iter().rposition(|n| *n == index).unwrap();
- // I need a Clone closure
- #[derive(Clone)]
- struct GetObligation<'a, O: 'a>(&'a [Node<O>]);
- impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> {
- type Output = &'a O;
- extern "rust-call" fn call_once(self, args: (&'b usize,)) -> &'a O {
- &self.0[*args.0].obligation
- }
- }
- impl<'a, 'b, O> FnMut<(&'b usize,)> for GetObligation<'a, O> {
- extern "rust-call" fn call_mut(&mut self, args: (&'b usize,)) -> &'a O {
- &self.0[*args.0].obligation
- }
- }
-
processor.process_backedge(stack[index..].iter().map(GetObligation(&self.nodes)),
PhantomData);
}
}
}
}
+
+// I need a Clone closure
+#[derive(Clone)]
+struct GetObligation<'a, O: 'a>(&'a [Node<O>]);
+
+impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> {
+ type Output = &'a O;
+ extern "rust-call" fn call_once(self, args: (&'b usize,)) -> &'a O {
+ &self.0[*args.0].obligation
+ }
+}
+
+impl<'a, 'b, O> FnMut<(&'b usize,)> for GetObligation<'a, O> {
+ extern "rust-call" fn call_mut(&mut self, args: (&'b usize,)) -> &'a O {
+ &self.0[*args.0].obligation
+ }
+}
/// This hasher currently always uses the stable Blake2b algorithm
/// and allows for variable output lengths through its type
/// parameter.
-#[derive(Debug)]
pub struct StableHasher<W> {
state: Blake2bHasher,
bytes_hashed: u64,
width: PhantomData<W>,
}
+impl<W: StableHasherResult> ::std::fmt::Debug for StableHasher<W> {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ write!(f, "{:?}", self.state)
+ }
+}
+
pub trait StableHasherResult: Sized {
fn finish(hasher: StableHasher<Self>) -> Self;
}
// except according to those terms.
use bitvec::BitMatrix;
+use stable_hasher::{HashStable, StableHasher, StableHasherResult};
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use std::cell::RefCell;
use std::fmt::Debug;
use std::mem;
+
+
#[derive(Clone)]
pub struct TransitiveRelation<T: Debug + PartialEq> {
// List of elements. This is used to map from a T to a usize. We
}
}
+impl<CTX, T> HashStable<CTX> for TransitiveRelation<T>
+ where T: HashStable<CTX> + PartialEq + Debug
+{
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ // We are assuming here that the relation graph has been built in a
+ // deterministic way and we can just hash it the way it is.
+ let TransitiveRelation {
+ ref elements,
+ ref edges,
+ // "closure" is just a copy of the data above
+ closure: _
+ } = *self;
+
+ elements.hash_stable(hcx, hasher);
+ edges.hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for Edge {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ let Edge {
+ ref source,
+ ref target,
+ } = *self;
+
+ source.hash_stable(hcx, hasher);
+ target.hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for Index {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ let Index(idx) = *self;
+ idx.hash_stable(hcx, hasher);
+ }
+}
+
#[test]
fn test_one_step() {
let mut relation = TransitiveRelation::new();
use rustc_privacy;
use rustc_plugin::registry::Registry;
use rustc_plugin as plugin;
-use rustc_passes::{ast_validation, no_asm, loops, consts, rvalues,
+use rustc_passes::{ast_validation, no_asm, loops, consts,
static_recursion, hir_stats, mir_stats};
use rustc_const_eval::check_match;
use super::Compilation;
let mut local_providers = ty::maps::Providers::default();
mir::provide(&mut local_providers);
rustc_privacy::provide(&mut local_providers);
+ borrowck::provide(&mut local_providers);
typeck::provide(&mut local_providers);
ty::provide(&mut local_providers);
reachable::provide(&mut local_providers);
let mut extern_providers = ty::maps::Providers::default();
cstore::provide(&mut extern_providers);
+ ty::provide_extern(&mut extern_providers);
TyCtxt::create_and_enter(sess,
local_providers,
"liveness checking",
|| middle::liveness::check_crate(tcx));
- time(time_passes,
- "rvalue checking",
- || rvalues::check_crate(tcx));
-
time(time_passes,
"MIR dump",
|| mir::mir_map::build_mir_for_crate(tcx));
// in stage 4 below.
passes.push_hook(box mir::transform::dump_mir::DumpMir);
passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("initial"));
- passes.push_pass(box mir::transform::qualify_consts::QualifyAndPromoteConstants);
passes.push_pass(box mir::transform::type_check::TypeckMir);
+ passes.push_pass(box mir::transform::qualify_consts::QualifyAndPromoteConstants);
passes.push_pass(
box mir::transform::simplify_branches::SimplifyBranches::new("initial"));
passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("qualify-consts"));
"serialize dep graph",
|| rustc_incremental::save_dep_graph(tcx,
&incremental_hashes_map,
+ &translation.metadata.hashes,
translation.link.crate_hash));
translation
}
outputs: &OutputFilenames) {
time(sess.time_passes(),
"linking",
- || link::link_binary(sess, trans, outputs, &trans.link.crate_name.as_str()));
+ || link::link_binary(sess, trans, outputs, &trans.crate_name.as_str()));
}
fn escape_dep_filename(filename: &str) -> String {
control.make_glob_map = resolve::MakeGlobMap::Yes;
}
+ if sess.print_fuel_crate.is_some() {
+ let old_callback = control.compilation_done.callback;
+ control.compilation_done.callback = box move |state| {
+ old_callback(state);
+ let sess = state.session;
+ println!("Fuel used by {}: {}",
+ sess.print_fuel_crate.as_ref().unwrap(),
+ sess.print_fuel.get());
+ }
+ }
control
}
}
hir::ItemStatic(..) |
hir::ItemFn(..) |
hir::ItemForeignMod(..) |
+ hir::ItemGlobalAsm(..) |
hir::ItemTy(..) => None,
hir::ItemEnum(..) |
pub render_span: Option<RenderSpan>,
}
+#[derive(PartialEq, Eq)]
+pub struct DiagnosticStyledString(pub Vec<StringPart>);
+
+impl DiagnosticStyledString {
+ pub fn new() -> DiagnosticStyledString {
+ DiagnosticStyledString(vec![])
+ }
+ pub fn push_normal<S: Into<String>>(&mut self, t: S) {
+ self.0.push(StringPart::Normal(t.into()));
+ }
+ pub fn push_highlighted<S: Into<String>>(&mut self, t: S) {
+ self.0.push(StringPart::Highlighted(t.into()));
+ }
+ pub fn normal<S: Into<String>>(t: S) -> DiagnosticStyledString {
+ DiagnosticStyledString(vec![StringPart::Normal(t.into())])
+ }
+
+ pub fn highlighted<S: Into<String>>(t: S) -> DiagnosticStyledString {
+ DiagnosticStyledString(vec![StringPart::Highlighted(t.into())])
+ }
+
+ pub fn content(&self) -> String {
+ self.0.iter().map(|x| x.content()).collect::<String>()
+ }
+}
+
+#[derive(PartialEq, Eq)]
+pub enum StringPart {
+ Normal(String),
+ Highlighted(String),
+}
+
+impl StringPart {
+ pub fn content(&self) -> String {
+ match self {
+ &StringPart::Normal(ref s) | & StringPart::Highlighted(ref s) => s.to_owned()
+ }
+ }
+}
+
impl Diagnostic {
pub fn new(level: Level, message: &str) -> Self {
Diagnostic::new_with_code(level, None, message)
pub fn note_expected_found(&mut self,
label: &fmt::Display,
- expected: &fmt::Display,
- found: &fmt::Display)
+ expected: DiagnosticStyledString,
+ found: DiagnosticStyledString)
-> &mut Self
{
self.note_expected_found_extra(label, expected, found, &"", &"")
pub fn note_expected_found_extra(&mut self,
label: &fmt::Display,
- expected: &fmt::Display,
- found: &fmt::Display,
+ expected: DiagnosticStyledString,
+ found: DiagnosticStyledString,
expected_extra: &fmt::Display,
found_extra: &fmt::Display)
-> &mut Self
{
+ let mut msg: Vec<_> = vec![(format!("expected {} `", label), Style::NoStyle)];
+ msg.extend(expected.0.iter()
+ .map(|x| match *x {
+ StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
+ StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
+ }));
+ msg.push((format!("`{}\n", expected_extra), Style::NoStyle));
+ msg.push((format!(" found {} `", label), Style::NoStyle));
+ msg.extend(found.0.iter()
+ .map(|x| match *x {
+ StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
+ StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
+ }));
+ msg.push((format!("`{}", found_extra), Style::NoStyle));
+
// For now, just attach these as notes
- self.highlighted_note(vec![
- (format!("expected {} `", label), Style::NoStyle),
- (format!("{}", expected), Style::Highlight),
- (format!("`{}\n", expected_extra), Style::NoStyle),
- (format!(" found {} `", label), Style::NoStyle),
- (format!("{}", found), Style::Highlight),
- (format!("`{}", found_extra), Style::NoStyle),
- ]);
+ self.highlighted_note(msg);
self
}
// except according to those terms.
use Diagnostic;
+use DiagnosticStyledString;
+
use Level;
use Handler;
use std::fmt::{self, Debug};
forward!(pub fn note_expected_found(&mut self,
label: &fmt::Display,
- expected: &fmt::Display,
- found: &fmt::Display)
+ expected: DiagnosticStyledString,
+ found: DiagnosticStyledString)
-> &mut Self);
forward!(pub fn note_expected_found_extra(&mut self,
label: &fmt::Display,
- expected: &fmt::Display,
- found: &fmt::Display,
+ expected: DiagnosticStyledString,
+ found: DiagnosticStyledString,
expected_extra: &fmt::Display,
found_extra: &fmt::Display)
-> &mut Self);
}
}
-pub use diagnostic::{Diagnostic, SubDiagnostic};
+pub use diagnostic::{Diagnostic, SubDiagnostic, DiagnosticStyledString, StringPart};
pub use diagnostic_builder::DiagnosticBuilder;
/// A handler deals with errors; certain errors
use rustc::dep_graph::{DepNode, WorkProduct, WorkProductId};
use rustc::hir::def_id::DefIndex;
use rustc::ich::Fingerprint;
+use rustc::middle::cstore::EncodedMetadataHash;
use std::sync::Arc;
use rustc_data_structures::fx::FxHashMap;
/// where `X` refers to some item in this crate. That `X` will be
/// a `DefPathIndex` that gets retracted to the current `DefId`
/// (matching the one found in this structure).
- pub hashes: Vec<SerializedMetadataHash>,
+ pub hashes: Vec<EncodedMetadataHash>,
/// For each DefIndex (as it occurs in SerializedMetadataHash), this
/// map stores the DefPathIndex (as it occurs in DefIdDirectory), so
/// the DefIndex.
pub index_map: FxHashMap<DefIndex, DefPathIndex>
}
-
-/// The hash for some metadata that (when saving) will be exported
-/// from this crate, or which (when importing) was exported by an
-/// upstream crate.
-#[derive(Debug, RustcEncodable, RustcDecodable)]
-pub struct SerializedMetadataHash {
- pub def_index: DefIndex,
-
- /// the hash itself, computed by `calculate_item_hash`
- pub hash: Fingerprint,
-}
}
}
-pub fn check_dirty_clean_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- prev_metadata_hashes: &FxHashMap<DefId, Fingerprint>,
- current_metadata_hashes: &FxHashMap<DefId, Fingerprint>) {
+pub fn check_dirty_clean_metadata<'a, 'tcx>(
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ prev_metadata_hashes: &FxHashMap<DefId, Fingerprint>,
+ current_metadata_hashes: &FxHashMap<DefId, Fingerprint>)
+{
if !tcx.sess.opts.debugging_opts.query_dep_graph {
return;
}
current_metadata_hashes: current_metadata_hashes,
checked_attrs: FxHashSet(),
};
- krate.visit_all_item_likes(&mut dirty_clean_visitor);
+ intravisit::walk_crate(&mut dirty_clean_visitor, krate);
let mut all_attrs = FindAllAttrs {
tcx: tcx,
});
}
-pub struct DirtyCleanMetadataVisitor<'a, 'tcx:'a, 'm> {
+pub struct DirtyCleanMetadataVisitor<'a, 'tcx: 'a, 'm> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
prev_metadata_hashes: &'m FxHashMap<DefId, Fingerprint>,
current_metadata_hashes: &'m FxHashMap<DefId, Fingerprint>,
checked_attrs: FxHashSet<ast::AttrId>,
}
-impl<'a, 'tcx, 'm> ItemLikeVisitor<'tcx> for DirtyCleanMetadataVisitor<'a, 'tcx, 'm> {
+impl<'a, 'tcx, 'm> intravisit::Visitor<'tcx> for DirtyCleanMetadataVisitor<'a, 'tcx, 'm> {
+
+ fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> {
+ intravisit::NestedVisitorMap::All(&self.tcx.hir)
+ }
+
fn visit_item(&mut self, item: &'tcx hir::Item) {
self.check_item(item.id, item.span);
+ intravisit::walk_item(self, item);
+ }
- if let hir::ItemEnum(ref def, _) = item.node {
- for v in &def.variants {
- self.check_item(v.node.data.id(), v.span);
- }
+ fn visit_variant_data(&mut self,
+ variant_data: &'tcx hir::VariantData,
+ _: ast::Name,
+ _: &'tcx hir::Generics,
+ _parent_id: ast::NodeId,
+ span: Span) {
+ if self.tcx.hir.find(variant_data.id()).is_some() {
+ // VariantData that represent structs or tuples don't have a
+ // separate entry in the HIR map and checking them would error,
+ // so only check if this is an enum or union variant.
+ self.check_item(variant_data.id(), span);
}
+
+ intravisit::walk_struct_def(self, variant_data);
}
- fn visit_trait_item(&mut self, item: &hir::TraitItem) {
+ fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) {
self.check_item(item.id, item.span);
+ intravisit::walk_trait_item(self, item);
}
- fn visit_impl_item(&mut self, item: &hir::ImplItem) {
+ fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) {
self.check_item(item.id, item.span);
+ intravisit::walk_impl_item(self, item);
+ }
+
+ fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem) {
+ self.check_item(i.id, i.span);
+ intravisit::walk_foreign_item(self, i);
+ }
+
+ fn visit_struct_field(&mut self, s: &'tcx hir::StructField) {
+ self.check_item(s.id, s.span);
+ intravisit::walk_struct_field(self, s);
}
}
for attr in self.tcx.get_attrs(def_id).iter() {
if attr.check_name(ATTR_DIRTY_METADATA) {
if check_config(self.tcx, attr) {
- self.checked_attrs.insert(attr.id);
- self.assert_state(false, def_id, item_span);
+ if self.checked_attrs.insert(attr.id) {
+ self.assert_state(false, def_id, item_span);
+ }
}
} else if attr.check_name(ATTR_CLEAN_METADATA) {
if check_config(self.tcx, attr) {
- self.checked_attrs.insert(attr.id);
- self.assert_state(true, def_id, item_span);
+ if self.checked_attrs.insert(attr.id) {
+ self.assert_state(true, def_id, item_span);
+ }
}
}
}
pub fn new(query: &'q DepGraphQuery<DefId>, hcx: &mut HashContext) -> Self {
let tcx = hcx.tcx;
- let collect_for_metadata = tcx.sess.opts.debugging_opts.incremental_cc ||
- tcx.sess.opts.debugging_opts.query_dep_graph;
-
// Find the set of "start nodes". These are nodes that we will
// possibly query later.
let is_output = |node: &DepNode<DefId>| -> bool {
match *node {
DepNode::WorkProduct(_) => true,
- DepNode::MetaData(ref def_id) => collect_for_metadata && def_id.is_local(),
-
+ DepNode::MetaData(ref def_id) => {
+ // We do *not* create dep-nodes for the current crate's
+ // metadata anymore, just for metadata that we import/read
+ // from other crates.
+ debug_assert!(!def_id.is_local());
+ false
+ }
// if -Z query-dep-graph is passed, save more extended data
// to enable better unit testing
DepNode::TypeckTables(_) |
.or_insert_with(|| hcx.hash(input).unwrap());
}
+ if tcx.sess.opts.debugging_opts.query_dep_graph {
+ // Not all inputs might have been reachable from an output node,
+ // but we still want their hash for our unit tests.
+ let hir_nodes = query.graph.all_nodes().iter().filter_map(|node| {
+ match node.data {
+ DepNode::Hir(_) => Some(&node.data),
+ _ => None,
+ }
+ });
+
+ for node in hir_nodes {
+ hashes.entry(node)
+ .or_insert_with(|| hcx.hash(node).unwrap());
+ }
+ }
+
let bootstrap_outputs: Vec<&'q DepNode<DefId>> =
(0 .. graph.len_nodes())
.map(NodeIndex)
use rustc::hir::def_id::DefId;
use rustc::hir::svh::Svh;
use rustc::ich::Fingerprint;
+use rustc::middle::cstore::EncodedMetadataHash;
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::graph::{NodeIndex, INCOMING};
use rustc_serialize::Encodable as RustcEncodable;
use rustc_serialize::opaque::Encoder;
-use std::hash::Hash;
use std::io::{self, Cursor, Write};
use std::fs::{self, File};
use std::path::PathBuf;
use super::dirty_clean;
use super::file_format;
use super::work_product;
-use calculate_svh::IchHasher;
pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
incremental_hashes_map: &IncrementalHashesMap,
+ metadata_hashes: &[EncodedMetadataHash],
svh: Svh) {
debug!("save_dep_graph()");
let _ignore = tcx.dep_graph.in_ignore();
let preds = Predecessors::new(&query, &mut hcx);
let mut current_metadata_hashes = FxHashMap();
+ // IMPORTANT: We are saving the metadata hashes *before* the dep-graph,
+ // since metadata-encoding might add new entries to the
+ // DefIdDirectory (which is saved in the dep-graph file).
if sess.opts.debugging_opts.incremental_cc ||
sess.opts.debugging_opts.query_dep_graph {
- // IMPORTANT: We are saving the metadata hashes *before* the dep-graph,
- // since metadata-encoding might add new entries to the
- // DefIdDirectory (which is saved in the dep-graph file).
save_in(sess,
metadata_hash_export_path(sess),
|e| encode_metadata_hashes(tcx,
svh,
- &preds,
+ metadata_hashes,
&mut builder,
&mut current_metadata_hashes,
e));
pub fn encode_metadata_hashes(tcx: TyCtxt,
svh: Svh,
- preds: &Predecessors,
+ metadata_hashes: &[EncodedMetadataHash],
builder: &mut DefIdDirectoryBuilder,
current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>,
encoder: &mut Encoder)
-> io::Result<()> {
- // For each `MetaData(X)` node where `X` is local, accumulate a
- // hash. These are the metadata items we export. Downstream
- // crates will want to see a hash that tells them whether we might
- // have changed the metadata for a given item since they last
- // compiled.
- //
- // (I initially wrote this with an iterator, but it seemed harder to read.)
let mut serialized_hashes = SerializedMetadataHashes {
- hashes: vec![],
+ hashes: metadata_hashes.to_vec(),
index_map: FxHashMap()
};
- for (index, target) in preds.reduced_graph.all_nodes().iter().enumerate() {
- let index = NodeIndex(index);
- let def_id = match *target.data {
- DepNode::MetaData(def_id) if def_id.is_local() => def_id,
- _ => continue,
- };
-
- // To create the hash for each item `X`, we don't hash the raw
- // bytes of the metadata (though in principle we
- // could). Instead, we walk the predecessors of `MetaData(X)`
- // from the dep-graph. This corresponds to all the inputs that
- // were read to construct the metadata. To create the hash for
- // the metadata, we hash (the hash of) all of those inputs.
- debug!("save: computing metadata hash for {:?}", def_id);
-
- // Create a vector containing a pair of (source-id, hash).
- // The source-id is stored as a `DepNode<u64>`, where the u64
- // is the det. hash of the def-path. This is convenient
- // because we can sort this to get a stable ordering across
- // compilations, even if the def-ids themselves have changed.
- let mut hashes: Vec<(DepNode<u64>, Fingerprint)> =
- preds.reduced_graph
- .depth_traverse(index, INCOMING)
- .map(|index| preds.reduced_graph.node_data(index))
- .filter(|dep_node| HashContext::is_hashable(dep_node))
- .map(|dep_node| {
- let hash_dep_node = dep_node.map_def(|&def_id| Some(tcx.def_path_hash(def_id)))
- .unwrap();
- let hash = preds.hashes[dep_node];
- (hash_dep_node, hash)
- })
- .collect();
-
- hashes.sort();
- let mut state = IchHasher::new();
- hashes.hash(&mut state);
- let hash = state.finish();
-
- debug!("save: metadata hash for {:?} is {}", def_id, hash);
-
- if tcx.sess.opts.debugging_opts.incremental_dump_hash {
- println!("metadata hash for {:?} is {}", def_id, hash);
- for pred_index in preds.reduced_graph.depth_traverse(index, INCOMING) {
- let dep_node = preds.reduced_graph.node_data(pred_index);
- if HashContext::is_hashable(&dep_node) {
- println!("metadata hash for {:?} depends on {:?} with hash {}",
- def_id, dep_node, preds.hashes[dep_node]);
- }
- }
- }
-
- serialized_hashes.hashes.push(SerializedMetadataHash {
- def_index: def_id.index,
- hash: hash,
- });
- }
-
if tcx.sess.opts.debugging_opts.query_dep_graph {
for serialized_hash in &serialized_hashes.hashes {
let def_id = DefId::local(serialized_hash.def_index);
}
match def.adt_kind() {
AdtKind::Struct => {
- if !def.repr.c {
+ if !def.repr.c() {
return FfiUnsafe("found struct without foreign-function-safe \
representation annotation in foreign module, \
consider adding a #[repr(C)] attribute to the type");
if all_phantom { FfiPhantom } else { FfiSafe }
}
AdtKind::Union => {
- if !def.repr.c {
+ if !def.repr.c() {
return FfiUnsafe("found union without foreign-function-safe \
representation annotation in foreign module, \
consider adding a #[repr(C)] attribute to the type");
// Check for a repr() attribute to specify the size of the
// discriminant.
- if !def.repr.c && def.repr.int.is_none() {
+ if !def.repr.c() && def.repr.int.is_none() {
// Special-case types like `Option<extern fn()>`.
if !is_repr_nullable_ptr(cx, def, substs) {
return FfiUnsafe("found enum without foreign-function-safe \
/// See Module::setModuleInlineAsm.
pub fn LLVMSetModuleInlineAsm(M: ModuleRef, Asm: *const c_char);
+ pub fn LLVMRustAppendModuleInlineAsm(M: ModuleRef, Asm: *const c_char);
/// See llvm::LLVMTypeKind::getTypeID.
pub fn LLVMRustGetTypeKind(Ty: TypeRef) -> TypeKind;
use rustc::hir::intravisit::{Visitor, NestedVisitorMap};
-use encoder::EncodeContext;
+use index_builder::EntryBuilder;
use schema::*;
use rustc::hir;
use rustc::ty;
-use rustc_serialize::Encodable;
-
#[derive(RustcEncodable, RustcDecodable)]
pub struct Ast<'tcx> {
pub body: Lazy<hir::Body>,
pub rvalue_promotable_to_static: bool,
}
-impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+impl_stable_hash_for!(struct Ast<'tcx> {
+ body,
+ tables,
+ nested_bodies,
+ rvalue_promotable_to_static
+});
+
+impl<'a, 'b, 'tcx> EntryBuilder<'a, 'b, 'tcx> {
pub fn encode_body(&mut self, body_id: hir::BodyId) -> Lazy<Ast<'tcx>> {
let body = self.tcx.hir.body(body_id);
let lazy_body = self.lazy(body);
let tables = self.tcx.body_tables(body_id);
let lazy_tables = self.lazy(tables);
- let nested_pos = self.position();
- let nested_count = {
- let mut visitor = NestedBodyEncodingVisitor {
- ecx: self,
- count: 0,
- };
- visitor.visit_body(body);
- visitor.count
+ let mut visitor = NestedBodyCollector {
+ tcx: self.tcx,
+ bodies_found: Vec::new(),
};
+ visitor.visit_body(body);
+ let lazy_nested_bodies = self.lazy_seq_ref_from_slice(&visitor.bodies_found);
let rvalue_promotable_to_static =
self.tcx.rvalue_promotable_to_static.borrow()[&body.value.id];
self.lazy(&Ast {
body: lazy_body,
tables: lazy_tables,
- nested_bodies: LazySeq::with_position_and_length(nested_pos, nested_count),
+ nested_bodies: lazy_nested_bodies,
rvalue_promotable_to_static: rvalue_promotable_to_static
})
}
}
-struct NestedBodyEncodingVisitor<'a, 'b: 'a, 'tcx: 'b> {
- ecx: &'a mut EncodeContext<'b, 'tcx>,
- count: usize,
+struct NestedBodyCollector<'a, 'tcx: 'a> {
+ tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
+ bodies_found: Vec<&'tcx hir::Body>,
}
-impl<'a, 'b, 'tcx> Visitor<'tcx> for NestedBodyEncodingVisitor<'a, 'b, 'tcx> {
+impl<'a, 'tcx: 'a> Visitor<'tcx> for NestedBodyCollector<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::None
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
- let body = self.ecx.tcx.hir.body(body);
- body.encode(self.ecx).unwrap();
- self.count += 1;
-
+ let body = self.tcx.hir.body(body);
+ self.bodies_found.push(body);
self.visit_body(body);
}
}
Err(err) => self.sess.span_fatal(span, &err),
};
- let sym = self.sess.generate_derive_registrar_symbol(&root.hash,
+ let sym = self.sess.generate_derive_registrar_symbol(root.disambiguator,
root.macro_derive_registrar.unwrap());
let registrar = unsafe {
let sym = match lib.symbol(&sym) {
/// Look for a plugin registrar. Returns library path, crate
/// SVH and DefIndex of the registrar function.
pub fn find_plugin_registrar(&mut self, span: Span, name: &str)
- -> Option<(PathBuf, Svh, DefIndex)> {
+ -> Option<(PathBuf, Symbol, DefIndex)> {
let ekrate = self.read_extension_crate(span, &ExternCrateInfo {
name: Symbol::intern(name),
ident: Symbol::intern(name),
let root = ekrate.metadata.get_root();
match (ekrate.dylib.as_ref(), root.plugin_registrar_fn) {
(Some(dylib), Some(reg)) => {
- Some((dylib.to_path_buf(), root.hash, reg))
+ Some((dylib.to_path_buf(), root.disambiguator, reg))
}
(None, Some(_)) => {
span_err!(self.sess, span, E0457,
use schema;
use rustc::dep_graph::DepTrackingMapConfig;
-use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind, ExternCrate};
-use rustc::middle::cstore::{NativeLibrary, LinkMeta, LinkagePreference, LoadedMacro};
+use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind,
+ ExternCrate, NativeLibrary, LinkMeta,
+ LinkagePreference, LoadedMacro, EncodedMetadata};
use rustc::hir::def::{self, Def};
use rustc::middle::lang_items;
use rustc::session::Session;
fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
- reachable: &NodeSet) -> Vec<u8>
+ reachable: &NodeSet)
+ -> EncodedMetadata
{
encoder::encode_metadata(tcx, self, link_meta, reachable)
}
EntryKind::Trait(_) => Def::Trait(did),
EntryKind::Enum(..) => Def::Enum(did),
EntryKind::MacroDef(_) => Def::Macro(did, MacroKind::Bang),
+ EntryKind::GlobalAsm => Def::GlobalAsm(did),
EntryKind::ForeignMod |
EntryKind::Impl(_) |
use index::Index;
use schema::*;
-use rustc::middle::cstore::{LinkMeta, LinkagePreference, NativeLibrary};
-use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId};
+use rustc::middle::cstore::{LinkMeta, LinkagePreference, NativeLibrary,
+ EncodedMetadata, EncodedMetadataHash};
+use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId, LOCAL_CRATE};
use rustc::hir::map::definitions::DefPathTable;
use rustc::middle::dependency_format::Linkage;
use rustc::middle::lang_items;
use rustc::hir::intravisit::{Visitor, NestedVisitorMap};
use rustc::hir::intravisit;
-use super::index_builder::{FromId, IndexBuilder, Untracked};
+use super::index_builder::{FromId, IndexBuilder, Untracked, EntryBuilder};
pub struct EncodeContext<'a, 'tcx: 'a> {
opaque: opaque::Encoder<'a>,
lazy_state: LazyState,
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
predicate_shorthands: FxHashMap<ty::Predicate<'tcx>, usize>,
+
+ pub metadata_hashes: Vec<EncodedMetadataHash>,
}
macro_rules! encoder_methods {
})
}
- fn lazy_seq<I, T>(&mut self, iter: I) -> LazySeq<T>
+ pub fn lazy_seq<I, T>(&mut self, iter: I) -> LazySeq<T>
where I: IntoIterator<Item = T>,
T: Encodable
{
})
}
- fn lazy_seq_ref<'b, I, T>(&mut self, iter: I) -> LazySeq<T>
+ pub fn lazy_seq_ref<'b, I, T>(&mut self, iter: I) -> LazySeq<T>
where I: IntoIterator<Item = &'b T>,
T: 'b + Encodable
{
Ok(())
}
+}
+impl<'a, 'b: 'a, 'tcx: 'b> EntryBuilder<'a, 'b, 'tcx> {
fn encode_item_variances(&mut self, def_id: DefId) -> LazySeq<ty::Variance> {
+ debug!("EntryBuilder::encode_item_variances({:?})", def_id);
let tcx = self.tcx;
- self.lazy_seq(tcx.item_variances(def_id).iter().cloned())
+ self.lazy_seq_from_slice(&tcx.item_variances(def_id))
}
fn encode_item_type(&mut self, def_id: DefId) -> Lazy<Ty<'tcx>> {
let tcx = self.tcx;
- self.lazy(&tcx.item_type(def_id))
+ let ty = tcx.item_type(def_id);
+ debug!("EntryBuilder::encode_item_type({:?}) => {:?}", def_id, ty);
+ self.lazy(&ty)
}
/// Encode data for the given variant of the given ADT. The
let def = tcx.lookup_adt_def(enum_did);
let variant = &def.variants[index];
let def_id = variant.did;
+ debug!("EntryBuilder::encode_enum_variant_info({:?})", def_id);
let data = VariantData {
ctor_kind: variant.ctor_kind,
-> Entry<'tcx> {
let tcx = self.tcx;
let def_id = tcx.hir.local_def_id(id);
+ debug!("EntryBuilder::encode_info_for_mod({:?})", def_id);
let data = ModData {
reexports: match tcx.export_map.get(&id) {
- Some(exports) if *vis == hir::Public => self.lazy_seq_ref(exports),
+ Some(exports) if *vis == hir::Public => {
+ self.lazy_seq_from_slice(exports.as_slice())
+ }
_ => LazySeq::empty(),
},
};
for (variant_index, variant) in def.variants.iter().enumerate() {
for (field_index, field) in variant.fields.iter().enumerate() {
self.record(field.did,
- EncodeContext::encode_field,
+ EntryBuilder::encode_field,
(adt_def_id, Untracked((variant_index, field_index))));
}
}
}
}
-impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+impl<'a, 'b: 'a, 'tcx: 'b> EntryBuilder<'a, 'b, 'tcx> {
/// Encode data for the given field of the given variant of the
/// given ADT. The indices of the variant/field are untracked:
/// this is ok because we will have to lookup the adt-def by its
let field = &variant.fields[field_index];
let def_id = field.did;
+ debug!("EntryBuilder::encode_field({:?})", def_id);
+
let variant_id = tcx.hir.as_local_node_id(variant.did).unwrap();
let variant_data = tcx.hir.expect_variant_data(variant_id);
}
fn encode_struct_ctor(&mut self, (adt_def_id, def_id): (DefId, DefId)) -> Entry<'tcx> {
+ debug!("EntryBuilder::encode_struct_ctor({:?})", def_id);
let tcx = self.tcx;
let variant = tcx.lookup_adt_def(adt_def_id).struct_variant();
}
fn encode_generics(&mut self, def_id: DefId) -> Lazy<ty::Generics> {
+ debug!("EntryBuilder::encode_generics({:?})", def_id);
let tcx = self.tcx;
self.lazy(tcx.item_generics(def_id))
}
fn encode_predicates(&mut self, def_id: DefId) -> Lazy<ty::GenericPredicates<'tcx>> {
+ debug!("EntryBuilder::encode_predicates({:?})", def_id);
let tcx = self.tcx;
self.lazy(&tcx.item_predicates(def_id))
}
fn encode_info_for_trait_item(&mut self, def_id: DefId) -> Entry<'tcx> {
+ debug!("EntryBuilder::encode_info_for_trait_item({:?})", def_id);
let tcx = self.tcx;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
}
fn encode_info_for_impl_item(&mut self, def_id: DefId) -> Entry<'tcx> {
+ debug!("EntryBuilder::encode_info_for_impl_item({:?})", def_id);
let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
let ast_item = self.tcx.hir.expect_impl_item(node_id);
let impl_item = self.tcx.associated_item(def_id);
}
fn encode_mir(&mut self, def_id: DefId) -> Option<Lazy<mir::Mir<'tcx>>> {
+ debug!("EntryBuilder::encode_mir({:?})", def_id);
self.tcx.maps.mir.borrow().get(&def_id).map(|mir| self.lazy(&*mir.borrow()))
}
// Encodes the inherent implementations of a structure, enumeration, or trait.
fn encode_inherent_implementations(&mut self, def_id: DefId) -> LazySeq<DefIndex> {
+ debug!("EntryBuilder::encode_inherent_implementations({:?})", def_id);
match self.tcx.maps.inherent_impls.borrow().get(&def_id) {
None => LazySeq::empty(),
Some(implementations) => {
}
fn encode_stability(&mut self, def_id: DefId) -> Option<Lazy<attr::Stability>> {
+ debug!("EntryBuilder::encode_stability({:?})", def_id);
self.tcx.lookup_stability(def_id).map(|stab| self.lazy(stab))
}
fn encode_deprecation(&mut self, def_id: DefId) -> Option<Lazy<attr::Deprecation>> {
+ debug!("EntryBuilder::encode_deprecation({:?})", def_id);
self.tcx.lookup_deprecation(def_id).map(|depr| self.lazy(&depr))
}
fn encode_info_for_item(&mut self, (def_id, item): (DefId, &'tcx hir::Item)) -> Entry<'tcx> {
let tcx = self.tcx;
- debug!("encoding info for item at {}",
- tcx.sess.codemap().span_to_string(item.span));
+ debug!("EntryBuilder::encode_info_for_item({:?})", def_id);
let kind = match item.node {
hir::ItemStatic(_, hir::MutMutable, _) => EntryKind::MutStatic,
return self.encode_info_for_mod(FromId(item.id, (m, &item.attrs, &item.vis)));
}
hir::ItemForeignMod(_) => EntryKind::ForeignMod,
+ hir::ItemGlobalAsm(..) => EntryKind::GlobalAsm,
hir::ItemTy(..) => EntryKind::Type,
hir::ItemEnum(..) => EntryKind::Enum(get_repr_options(&tcx, def_id)),
hir::ItemStruct(ref struct_def, _) => {
hir::ItemFn(..) |
hir::ItemMod(..) |
hir::ItemForeignMod(..) |
+ hir::ItemGlobalAsm(..) |
hir::ItemExternCrate(..) |
hir::ItemUse(..) |
hir::ItemDefaultImpl(..) |
let def = self.tcx.lookup_adt_def(def_id);
for (i, variant) in def.variants.iter().enumerate() {
self.record(variant.did,
- EncodeContext::encode_enum_variant_info,
+ EntryBuilder::encode_enum_variant_info,
(def_id, Untracked(i)));
}
}
if !struct_def.is_struct() {
let ctor_def_id = self.tcx.hir.local_def_id(struct_def.id());
self.record(ctor_def_id,
- EncodeContext::encode_struct_ctor,
+ EntryBuilder::encode_struct_ctor,
(def_id, ctor_def_id));
}
}
hir::ItemImpl(..) => {
for &trait_item_def_id in self.tcx.associated_item_def_ids(def_id).iter() {
self.record(trait_item_def_id,
- EncodeContext::encode_info_for_impl_item,
+ EntryBuilder::encode_info_for_impl_item,
trait_item_def_id);
}
}
hir::ItemTrait(..) => {
for &item_def_id in self.tcx.associated_item_def_ids(def_id).iter() {
self.record(item_def_id,
- EncodeContext::encode_info_for_trait_item,
+ EntryBuilder::encode_info_for_trait_item,
item_def_id);
}
}
}
}
-impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+impl<'a, 'b: 'a, 'tcx: 'b> EntryBuilder<'a, 'b, 'tcx> {
fn encode_info_for_foreign_item(&mut self,
(def_id, nitem): (DefId, &hir::ForeignItem))
-> Entry<'tcx> {
let tcx = self.tcx;
- debug!("writing foreign item {}", tcx.node_path_str(nitem.id));
+ debug!("EntryBuilder::encode_info_for_foreign_item({:?})", def_id);
let kind = match nitem.node {
hir::ForeignItemFn(_, ref names, _) => {
match item.node {
hir::ItemExternCrate(_) |
hir::ItemUse(..) => (), // ignore these
- _ => self.index.record(def_id, EncodeContext::encode_info_for_item, (def_id, item)),
+ _ => self.index.record(def_id, EntryBuilder::encode_info_for_item, (def_id, item)),
}
self.index.encode_addl_info_for_item(item);
}
intravisit::walk_foreign_item(self, ni);
let def_id = self.index.tcx.hir.local_def_id(ni.id);
self.index.record(def_id,
- EncodeContext::encode_info_for_foreign_item,
+ EntryBuilder::encode_info_for_foreign_item,
(def_id, ni));
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
}
fn visit_macro_def(&mut self, macro_def: &'tcx hir::MacroDef) {
let def_id = self.index.tcx.hir.local_def_id(macro_def.id);
- self.index.record(def_id, EncodeContext::encode_info_for_macro_def, macro_def);
+ self.index.record(def_id, EntryBuilder::encode_info_for_macro_def, macro_def);
}
}
for ty_param in &generics.ty_params {
let def_id = self.tcx.hir.local_def_id(ty_param.id);
let has_default = Untracked(ty_param.default.is_some());
- self.record(def_id, EncodeContext::encode_info_for_ty_param, (def_id, has_default));
+ self.record(def_id, EntryBuilder::encode_info_for_ty_param, (def_id, has_default));
}
}
fn encode_info_for_ty(&mut self, ty: &hir::Ty) {
if let hir::TyImplTrait(_) = ty.node {
let def_id = self.tcx.hir.local_def_id(ty.id);
- self.record(def_id, EncodeContext::encode_info_for_anon_ty, def_id);
+ self.record(def_id, EntryBuilder::encode_info_for_anon_ty, def_id);
}
}
match expr.node {
hir::ExprClosure(..) => {
let def_id = self.tcx.hir.local_def_id(expr.id);
- self.record(def_id, EncodeContext::encode_info_for_closure, def_id);
+ self.record(def_id, EntryBuilder::encode_info_for_closure, def_id);
}
_ => {}
}
}
}
-impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+impl<'a, 'b: 'a, 'tcx: 'b> EntryBuilder<'a, 'b, 'tcx> {
fn encode_info_for_ty_param(&mut self,
(def_id, Untracked(has_default)): (DefId, Untracked<bool>))
-> Entry<'tcx> {
+ debug!("EntryBuilder::encode_info_for_ty_param({:?})", def_id);
let tcx = self.tcx;
Entry {
kind: EntryKind::Type,
}
fn encode_info_for_anon_ty(&mut self, def_id: DefId) -> Entry<'tcx> {
+ debug!("EntryBuilder::encode_info_for_anon_ty({:?})", def_id);
let tcx = self.tcx;
Entry {
kind: EntryKind::Type,
}
fn encode_info_for_closure(&mut self, def_id: DefId) -> Entry<'tcx> {
+ debug!("EntryBuilder::encode_info_for_closure({:?})", def_id);
let tcx = self.tcx;
let data = ClosureData {
}
}
+ fn encode_attributes(&mut self, attrs: &[ast::Attribute]) -> LazySeq<ast::Attribute> {
+ // NOTE: This must use lazy_seq_from_slice(), not lazy_seq() because
+ // we really on the HashStable specialization for [Attribute]
+ // to properly filter things out.
+ self.lazy_seq_from_slice(attrs)
+ }
+}
+
+impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
fn encode_info_for_items(&mut self) -> Index {
let krate = self.tcx.hir.krate();
let mut index = IndexBuilder::new(self);
index.record(DefId::local(CRATE_DEF_INDEX),
- EncodeContext::encode_info_for_mod,
+ EntryBuilder::encode_info_for_mod,
FromId(CRATE_NODE_ID, (&krate.module, &krate.attrs, &hir::Public)));
let mut visitor = EncodeVisitor { index: index };
krate.visit_all_item_likes(&mut visitor.as_deep_visitor());
visitor.index.into_items()
}
- fn encode_attributes(&mut self, attrs: &[ast::Attribute]) -> LazySeq<ast::Attribute> {
- self.lazy_seq_ref(attrs)
- }
-
fn encode_crate_deps(&mut self) -> LazySeq<CrateDep> {
fn get_ordered_deps(cstore: &cstore::CStore) -> Vec<(CrateNum, Rc<cstore::CrateMetadata>)> {
// Pull the cnums and name,vers,hash out of cstore
None => LazySeq::empty(),
}
}
+}
+impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
fn encode_crate_root(&mut self) -> Lazy<CrateRoot> {
let mut i = self.position();
let crate_deps = self.encode_crate_deps();
let link_meta = self.link_meta;
let is_proc_macro = tcx.sess.crate_types.borrow().contains(&CrateTypeProcMacro);
let root = self.lazy(&CrateRoot {
- name: link_meta.crate_name,
+ name: tcx.crate_name(LOCAL_CRATE),
triple: tcx.sess.opts.target_triple.clone(),
hash: link_meta.crate_hash,
disambiguator: tcx.sess.local_crate_disambiguator(),
cstore: &cstore::CStore,
link_meta: &LinkMeta,
exported_symbols: &NodeSet)
- -> Vec<u8> {
+ -> EncodedMetadata
+{
let mut cursor = Cursor::new(vec![]);
cursor.write_all(METADATA_HEADER).unwrap();
// Will be filed with the root position after encoding everything.
cursor.write_all(&[0, 0, 0, 0]).unwrap();
- let root = {
+ let (root, metadata_hashes) = {
let mut ecx = EncodeContext {
opaque: opaque::Encoder::new(&mut cursor),
tcx: tcx,
lazy_state: LazyState::NoNode,
type_shorthands: Default::default(),
predicate_shorthands: Default::default(),
+ metadata_hashes: Vec::new(),
};
// Encode the rustc version string in a predictable location.
// Encode all the entries and extra information in the crate,
// culminating in the `CrateRoot` which points to all of it.
- ecx.encode_crate_root()
+ let root = ecx.encode_crate_root();
+ (root, ecx.metadata_hashes)
};
let mut result = cursor.into_inner();
result[header + 2] = (pos >> 8) as u8;
result[header + 3] = (pos >> 0) as u8;
- result
+ EncodedMetadata {
+ raw_data: result,
+ hashes: metadata_hashes,
+ }
}
pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions {
use index::Index;
use schema::*;
-use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::def_id::DefId;
+use rustc::ich::{StableHashingContext, Fingerprint};
+use rustc::middle::cstore::EncodedMetadataHash;
use rustc::ty::TyCtxt;
use syntax::ast;
use std::ops::{Deref, DerefMut};
+use rustc_data_structures::accumulate_vec::AccumulateVec;
+use rustc_data_structures::stable_hasher::{StableHasher, HashStable};
+use rustc_serialize::Encodable;
+
/// Builder that can encode new items, adding them into the index.
/// Item encoding cannot be nested.
pub struct IndexBuilder<'a, 'b: 'a, 'tcx: 'b> {
/// holds, and that it is therefore not gaining "secret" access to
/// bits of HIR or other state that would not be trackd by the
/// content system.
- pub fn record<DATA>(&mut self,
- id: DefId,
- op: fn(&mut EncodeContext<'b, 'tcx>, DATA) -> Entry<'tcx>,
- data: DATA)
+ pub fn record<'x, DATA>(&'x mut self,
+ id: DefId,
+ op: fn(&mut EntryBuilder<'x, 'b, 'tcx>, DATA) -> Entry<'tcx>,
+ data: DATA)
where DATA: DepGraphRead
{
- let _task = self.tcx.dep_graph.in_task(DepNode::MetaData(id));
- data.read(self.tcx);
- let entry = op(&mut self.ecx, data);
- self.items.record(id, self.ecx.lazy(&entry));
+ assert!(id.is_local());
+ let tcx: TyCtxt<'b, 'tcx, 'tcx> = self.ecx.tcx;
+
+ // We don't track this since we are explicitly computing the incr. comp.
+ // hashes anyway. In theory we could do some tracking here and use it to
+ // avoid rehashing things (and instead cache the hashes) but it's
+ // unclear whether that would be a win since hashing is cheap enough.
+ let _task = tcx.dep_graph.in_ignore();
+
+ let compute_ich = (tcx.sess.opts.debugging_opts.query_dep_graph ||
+ tcx.sess.opts.debugging_opts.incremental_cc) &&
+ tcx.sess.opts.build_dep_graph();
+
+ let ecx: &'x mut EncodeContext<'b, 'tcx> = &mut *self.ecx;
+ let mut entry_builder = EntryBuilder {
+ tcx: tcx,
+ ecx: ecx,
+ hcx: if compute_ich {
+ Some((StableHashingContext::new(tcx), StableHasher::new()))
+ } else {
+ None
+ }
+ };
+
+ let entry = op(&mut entry_builder, data);
+
+ if let Some((ref mut hcx, ref mut hasher)) = entry_builder.hcx {
+ entry.hash_stable(hcx, hasher);
+ }
+
+ let entry = entry_builder.ecx.lazy(&entry);
+ entry_builder.finish(id);
+ self.items.record(id, entry);
}
pub fn into_items(self) -> Index {
tcx.hir.read(self.0);
}
}
+
+pub struct EntryBuilder<'a, 'b: 'a, 'tcx: 'b> {
+ pub tcx: TyCtxt<'b, 'tcx, 'tcx>,
+ ecx: &'a mut EncodeContext<'b, 'tcx>,
+ hcx: Option<(StableHashingContext<'b, 'tcx>, StableHasher<Fingerprint>)>,
+}
+
+impl<'a, 'b: 'a, 'tcx: 'b> EntryBuilder<'a, 'b, 'tcx> {
+
+ pub fn finish(self, def_id: DefId) {
+ if let Some((_, hasher)) = self.hcx {
+ let hash = hasher.finish();
+ self.ecx.metadata_hashes.push(EncodedMetadataHash {
+ def_index: def_id.index,
+ hash: hash,
+ });
+ }
+ }
+
+ pub fn lazy<T>(&mut self, value: &T) -> Lazy<T>
+ where T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ {
+ if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
+ value.hash_stable(hcx, hasher);
+ debug!("metadata-hash: {:?}", hasher);
+ }
+ self.ecx.lazy(value)
+ }
+
+ pub fn lazy_seq<I, T>(&mut self, iter: I) -> LazySeq<T>
+ where I: IntoIterator<Item = T>,
+ T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ {
+ if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
+ let iter = iter.into_iter();
+ let (lower_bound, upper_bound) = iter.size_hint();
+
+ if upper_bound == Some(lower_bound) {
+ lower_bound.hash_stable(hcx, hasher);
+ let mut num_items_hashed = 0;
+ let ret = self.ecx.lazy_seq(iter.inspect(|item| {
+ item.hash_stable(hcx, hasher);
+ num_items_hashed += 1;
+ }));
+
+ // Sometimes items in a sequence are filtered out without being
+ // hashed (e.g. for &[ast::Attribute]) and this code path cannot
+ // handle that correctly, so we want to make sure we didn't hit
+ // it by accident.
+ if lower_bound != num_items_hashed {
+ bug!("Hashed a different number of items ({}) than expected ({})",
+ num_items_hashed,
+ lower_bound);
+ }
+ debug!("metadata-hash: {:?}", hasher);
+ ret
+ } else {
+ // Collect into a vec so we know the length of the sequence
+ let items: AccumulateVec<[T; 32]> = iter.collect();
+ items.hash_stable(hcx, hasher);
+ debug!("metadata-hash: {:?}", hasher);
+ self.ecx.lazy_seq(items)
+ }
+ } else {
+ self.ecx.lazy_seq(iter)
+ }
+ }
+
+ pub fn lazy_seq_from_slice<T>(&mut self, slice: &[T]) -> LazySeq<T>
+ where T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ {
+ if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
+ slice.hash_stable(hcx, hasher);
+ debug!("metadata-hash: {:?}", hasher);
+ }
+ self.ecx.lazy_seq_ref(slice.iter())
+ }
+
+ pub fn lazy_seq_ref_from_slice<T>(&mut self, slice: &[&T]) -> LazySeq<T>
+ where T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ {
+ if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
+ slice.hash_stable(hcx, hasher);
+ debug!("metadata-hash: {:?}", hasher);
+ }
+ self.ecx.lazy_seq_ref(slice.iter().map(|x| *x))
+ }
+}
#![feature(rustc_private)]
#![feature(specialization)]
#![feature(staged_api)]
+#![feature(discriminant_value)]
#[macro_use]
extern crate log;
use rustc::hir;
use rustc::hir::def::{self, CtorKind};
use rustc::hir::def_id::{DefIndex, DefId};
+use rustc::ich::StableHashingContext;
use rustc::middle::const_val::ConstVal;
use rustc::middle::cstore::{DepKind, LinkagePreference, NativeLibrary};
use rustc::middle::lang_items;
use syntax_pos::{self, Span};
use std::marker::PhantomData;
+use std::mem;
+
+use rustc_data_structures::stable_hasher::{StableHasher, HashStable,
+ StableHasherResult};
pub fn rustc_version() -> String {
format!("rustc {}",
impl<T> serialize::UseSpecializedEncodable for Lazy<T> {}
impl<T> serialize::UseSpecializedDecodable for Lazy<T> {}
+impl<CTX, T> HashStable<CTX> for Lazy<T> {
+ fn hash_stable<W: StableHasherResult>(&self,
+ _: &mut CTX,
+ _: &mut StableHasher<W>) {
+ // There's nothing to do. Whatever got encoded within this Lazy<>
+ // wrapper has already been hashed.
+ }
+}
+
/// A sequence of type T referred to by its absolute position
/// in the metadata and length, and which can be decoded lazily.
/// The sequence is a single node for the purposes of `Lazy`.
impl<T> serialize::UseSpecializedEncodable for LazySeq<T> {}
impl<T> serialize::UseSpecializedDecodable for LazySeq<T> {}
+impl<CTX, T> HashStable<CTX> for LazySeq<T> {
+ fn hash_stable<W: StableHasherResult>(&self,
+ _: &mut CTX,
+ _: &mut StableHasher<W>) {
+ // There's nothing to do. Whatever got encoded within this Lazy<>
+ // wrapper has already been hashed.
+ }
+}
+
/// Encoding / decoding state for `Lazy` and `LazySeq`.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LazyState {
pub mir: Option<Lazy<mir::Mir<'tcx>>>,
}
+impl_stable_hash_for!(struct Entry<'tcx> {
+ kind,
+ visibility,
+ span,
+ attributes,
+ children,
+ stability,
+ deprecation,
+ ty,
+ inherent_impls,
+ variances,
+ generics,
+ predicates,
+ ast,
+ mir
+});
+
#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
pub enum EntryKind<'tcx> {
Const(u8),
ForeignImmStatic,
ForeignMutStatic,
ForeignMod,
+ GlobalAsm,
Type,
Enum(ReprOptions),
Field,
AssociatedConst(AssociatedContainer, u8),
}
+impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for EntryKind<'tcx> {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ mem::discriminant(self).hash_stable(hcx, hasher);
+ match *self {
+ EntryKind::ImmStatic |
+ EntryKind::MutStatic |
+ EntryKind::ForeignImmStatic |
+ EntryKind::ForeignMutStatic |
+ EntryKind::ForeignMod |
+ EntryKind::GlobalAsm |
+ EntryKind::Field |
+ EntryKind::Type => {
+ // Nothing else to hash here.
+ }
+ EntryKind::Const(qualif) => {
+ qualif.hash_stable(hcx, hasher);
+ }
+ EntryKind::Enum(ref repr_options) => {
+ repr_options.hash_stable(hcx, hasher);
+ }
+ EntryKind::Variant(ref variant_data) => {
+ variant_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::Struct(ref variant_data, ref repr_options) |
+ EntryKind::Union(ref variant_data, ref repr_options) => {
+ variant_data.hash_stable(hcx, hasher);
+ repr_options.hash_stable(hcx, hasher);
+ }
+ EntryKind::Fn(ref fn_data) |
+ EntryKind::ForeignFn(ref fn_data) => {
+ fn_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::Mod(ref mod_data) => {
+ mod_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::MacroDef(ref macro_def) => {
+ macro_def.hash_stable(hcx, hasher);
+ }
+ EntryKind::Closure(closure_data) => {
+ closure_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::Trait(ref trait_data) => {
+ trait_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::DefaultImpl(ref impl_data) |
+ EntryKind::Impl(ref impl_data) => {
+ impl_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::Method(ref method_data) => {
+ method_data.hash_stable(hcx, hasher);
+ }
+ EntryKind::AssociatedType(associated_container) => {
+ associated_container.hash_stable(hcx, hasher);
+ }
+ EntryKind::AssociatedConst(associated_container, qualif) => {
+ associated_container.hash_stable(hcx, hasher);
+ qualif.hash_stable(hcx, hasher);
+ }
+ }
+ }
+}
+
#[derive(RustcEncodable, RustcDecodable)]
pub struct ModData {
pub reexports: LazySeq<def::Export>,
}
+impl_stable_hash_for!(struct ModData { reexports });
+
#[derive(RustcEncodable, RustcDecodable)]
pub struct MacroDef {
pub body: String,
}
+impl_stable_hash_for!(struct MacroDef { body });
+
#[derive(RustcEncodable, RustcDecodable)]
pub struct FnData {
pub constness: hir::Constness,
pub arg_names: LazySeq<ast::Name>,
}
+impl_stable_hash_for!(struct FnData { constness, arg_names });
+
#[derive(RustcEncodable, RustcDecodable)]
pub struct VariantData<'tcx> {
pub ctor_kind: CtorKind,
pub struct_ctor: Option<DefIndex>,
}
+impl_stable_hash_for!(struct VariantData<'tcx> {
+ ctor_kind,
+ discr,
+ evaluated_discr,
+ struct_ctor
+});
+
#[derive(RustcEncodable, RustcDecodable)]
pub struct TraitData<'tcx> {
pub unsafety: hir::Unsafety,
pub super_predicates: Lazy<ty::GenericPredicates<'tcx>>,
}
+impl_stable_hash_for!(struct TraitData<'tcx> {
+ unsafety,
+ paren_sugar,
+ has_default_impl,
+ super_predicates
+});
+
#[derive(RustcEncodable, RustcDecodable)]
pub struct ImplData<'tcx> {
pub polarity: hir::ImplPolarity,
pub trait_ref: Option<Lazy<ty::TraitRef<'tcx>>>,
}
+impl_stable_hash_for!(struct ImplData<'tcx> {
+ polarity,
+ parent_impl,
+ coerce_unsized_info,
+ trait_ref
+});
+
+
/// Describes whether the container of an associated item
/// is a trait or an impl and whether, in a trait, it has
/// a default, or an in impl, whether it's marked "default".
ImplFinal,
}
+impl_stable_hash_for!(enum ::schema::AssociatedContainer {
+ TraitRequired,
+ TraitWithDefault,
+ ImplDefault,
+ ImplFinal
+});
+
impl AssociatedContainer {
pub fn with_def_id(&self, def_id: DefId) -> ty::AssociatedItemContainer {
match *self {
pub container: AssociatedContainer,
pub has_self: bool,
}
+impl_stable_hash_for!(struct MethodData { fn_data, container, has_self });
#[derive(RustcEncodable, RustcDecodable)]
pub struct ClosureData<'tcx> {
pub kind: ty::ClosureKind,
pub ty: Lazy<ty::PolyFnSig<'tcx>>,
}
+impl_stable_hash_for!(struct ClosureData<'tcx> { kind, ty });
let idx = unpack!(block = this.as_operand(block, None, index));
// bounds check:
- let (len, lt) = (this.temp(usize_ty.clone()), this.temp(bool_ty));
+ let (len, lt) = (this.temp(usize_ty.clone(), expr_span),
+ this.temp(bool_ty, expr_span));
this.cfg.push_assign(block, source_info, // len = len(slice)
&len, Rvalue::Len(slice.clone()));
this.cfg.push_assign(block, source_info, // lt = idx < len
let bool_ty = this.hir.bool_ty();
let minval = this.minval_literal(expr_span, expr.ty);
- let is_min = this.temp(bool_ty);
+ let is_min = this.temp(bool_ty, expr_span);
this.cfg.push_assign(block, source_info, &is_min,
Rvalue::BinaryOp(BinOp::Eq, arg.clone(), minval));
}
ExprKind::Box { value, value_extents } => {
let value = this.hir.mirror(value);
- let result = this.temp(expr.ty);
+ let result = this.temp(expr.ty, expr_span);
// to start, malloc some memory of suitable type (thus far, uninitialized):
this.cfg.push_assign(block, source_info, &result, Rvalue::Box(value.ty));
this.in_scope(value_extents, block, |this| {
let bool_ty = self.hir.bool_ty();
if self.hir.check_overflow() && op.is_checkable() && ty.is_integral() {
let result_tup = self.hir.tcx().intern_tup(&[ty, bool_ty], false);
- let result_value = self.temp(result_tup);
+ let result_value = self.temp(result_tup, span);
self.cfg.push_assign(block, source_info,
&result_value, Rvalue::CheckedBinaryOp(op,
};
// Check for / 0
- let is_zero = self.temp(bool_ty);
+ let is_zero = self.temp(bool_ty, span);
let zero = self.zero_literal(span, ty);
self.cfg.push_assign(block, source_info, &is_zero,
Rvalue::BinaryOp(BinOp::Eq, rhs.clone(), zero));
let neg_1 = self.neg_1_literal(span, ty);
let min = self.minval_literal(span, ty);
- let is_neg_1 = self.temp(bool_ty);
- let is_min = self.temp(bool_ty);
- let of = self.temp(bool_ty);
+ let is_neg_1 = self.temp(bool_ty, span);
+ let is_min = self.temp(bool_ty, span);
+ let of = self.temp(bool_ty, span);
// this does (rhs == -1) & (lhs == MIN). It could short-circuit instead
}
let expr_ty = expr.ty.clone();
- let temp = this.temp(expr_ty.clone());
let expr_span = expr.span;
+ let temp = this.temp(expr_ty.clone(), expr_span);
let source_info = this.source_info(expr_span);
if expr.temp_lifetime_was_shrunk && this.hir.needs_drop(expr_ty) {
}
_ => {
let expr_ty = expr.ty;
- let temp = this.temp(expr.ty.clone());
+ let temp = this.temp(expr.ty.clone(), expr_span);
unpack!(block = this.into(&temp, block, expr));
unpack!(block = this.build_drop(block, expr_span, temp, expr_ty));
block.unit()
mutability: mutability,
ty: var_ty.clone(),
name: Some(name),
- source_info: Some(source_info),
+ source_info: source_info,
+ is_user_variable: true,
});
self.var_indices.insert(var_id, var);
debug!("num_enum_variants: {}, tested variants: {:?}, variants: {:?}",
num_enum_variants, values, variants);
let discr_ty = adt_def.repr.discr_type().to_ty(tcx);
- let discr = self.temp(discr_ty);
+ let discr = self.temp(discr_ty, test.span);
self.cfg.push_assign(block, source_info, &discr,
Rvalue::Discriminant(lvalue.clone()));
assert_eq!(values.len() + 1, targets.len());
if let ty::TyRef(region, mt) = ty.sty {
if let ty::TyArray(_, _) = mt.ty.sty {
ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8));
- let val_slice = self.temp(ty);
+ let val_slice = self.temp(ty, test.span);
self.cfg.push_assign(block, source_info, &val_slice,
Rvalue::Cast(CastKind::Unsize, val, ty));
val = Operand::Consume(val_slice);
value: value.clone()
});
- let slice = self.temp(ty);
+ let slice = self.temp(ty, test.span);
self.cfg.push_assign(block, source_info, &slice,
Rvalue::Cast(CastKind::Unsize, array, ty));
Operand::Consume(slice)
let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, &[ty]);
let bool_ty = self.hir.bool_ty();
- let eq_result = self.temp(bool_ty);
+ let eq_result = self.temp(bool_ty, test.span);
let eq_block = self.cfg.start_new_block();
let cleanup = self.diverge_cleanup();
self.cfg.terminate(block, source_info, TerminatorKind::Call {
TestKind::Len { len, op } => {
let (usize_ty, bool_ty) = (self.hir.usize_ty(), self.hir.bool_ty());
- let (actual, result) = (self.temp(usize_ty), self.temp(bool_ty));
+ let (actual, result) = (self.temp(usize_ty, test.span),
+ self.temp(bool_ty, test.span));
// actual = len(lvalue)
self.cfg.push_assign(block, source_info,
left: Operand<'tcx>,
right: Operand<'tcx>) -> BasicBlock {
let bool_ty = self.hir.bool_ty();
- let result = self.temp(bool_ty);
+ let result = self.temp(bool_ty, span);
// result = op(left, right)
let source_info = self.source_info(span);
///
/// NB: **No cleanup is scheduled for this temporary.** You should
/// call `schedule_drop` once the temporary is initialized.
- pub fn temp(&mut self, ty: Ty<'tcx>) -> Lvalue<'tcx> {
- let temp = self.local_decls.push(LocalDecl::new_temp(ty));
+ pub fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Lvalue<'tcx> {
+ let temp = self.local_decls.push(LocalDecl::new_temp(ty, span));
let lvalue = Lvalue::Local(temp);
debug!("temp: created temp {:?} with type {:?}",
lvalue, self.local_decls[temp].ty);
value: u64)
-> Lvalue<'tcx> {
let usize_ty = self.hir.usize_ty();
- let temp = self.temp(usize_ty);
+ let temp = self.temp(usize_ty, source_info.span);
self.cfg.push_assign_constant(
block, source_info, &temp,
Constant {
visibility_scopes: IndexVec::new(),
visibility_scope: ARGUMENT_VISIBILITY_SCOPE,
breakable_scopes: vec![],
- local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty), 1),
+ local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty,
+ span), 1),
var_indices: NodeMap(),
unit_temp: None,
cached_resume_block: None,
self.local_decls.push(LocalDecl {
mutability: Mutability::Not,
ty: ty,
- source_info: None,
+ source_info: SourceInfo {
+ scope: ARGUMENT_VISIBILITY_SCOPE,
+ span: pattern.map_or(self.fn_span, |pat| pat.span)
+ },
name: name,
+ is_user_variable: false,
});
}
Some(ref tmp) => tmp.clone(),
None => {
let ty = self.hir.unit_ty();
- let tmp = self.temp(ty);
+ let fn_span = self.fn_span;
+ let tmp = self.temp(ty, fn_span);
self.unit_temp = Some(tmp.clone());
tmp
}
```
"##,
+E0161: r##"
+A value was moved. However, its size was not known at compile time, and only
+values of a known size can be moved.
+
+Erroneous code example:
+
+```compile_fail
+#![feature(box_syntax)]
+
+fn main() {
+ let array: &[isize] = &[1, 2, 3];
+ let _x: Box<[isize]> = box *array;
+ // error: cannot move a value of type [isize]: the size of [isize] cannot
+ // be statically determined
+}
+```
+
+In Rust, you can only move a value when its size is known at compile time.
+
+To work around this restriction, consider "hiding" the value behind a reference:
+either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move
+it around as usual. Example:
+
+```
+#![feature(box_syntax)]
+
+fn main() {
+ let array: &[isize] = &[1, 2, 3];
+ let _x: Box<&[isize]> = box array; // ok!
+}
+```
+"##,
+
E0396: r##"
The value behind a raw pointer can't be determined at compile-time
(or even link-time), which means it can't be used in a constant
Direct(DefId),
}
-fn temp_decl(mutability: Mutability, ty: Ty) -> LocalDecl {
- LocalDecl { mutability, ty, name: None, source_info: None }
+fn temp_decl(mutability: Mutability, ty: Ty, span: Span) -> LocalDecl {
+ LocalDecl {
+ mutability, ty, name: None,
+ source_info: SourceInfo { scope: ARGUMENT_VISIBILITY_SCOPE, span },
+ is_user_variable: false
+ }
}
-fn local_decls_for_sig<'tcx>(sig: &ty::FnSig<'tcx>)
+fn local_decls_for_sig<'tcx>(sig: &ty::FnSig<'tcx>, span: Span)
-> IndexVec<Local, LocalDecl<'tcx>>
{
- iter::once(temp_decl(Mutability::Mut, sig.output()))
+ iter::once(temp_decl(Mutability::Mut, sig.output(), span))
.chain(sig.inputs().iter().map(
- |ity| temp_decl(Mutability::Not, ity)))
+ |ity| temp_decl(Mutability::Not, ity, span)))
.collect()
}
),
IndexVec::new(),
sig.output(),
- local_decls_for_sig(&sig),
+ local_decls_for_sig(&sig, span),
sig.inputs().len(),
vec![],
span
debug!("build_call_shim: sig={:?}", sig);
- let mut local_decls = local_decls_for_sig(&sig);
+ let mut local_decls = local_decls_for_sig(&sig, span);
let source_info = SourceInfo { span, scope: ARGUMENT_VISIBILITY_SCOPE };
let rcvr_arg = Local::new(1+0);
tcx.mk_ref(re_erased, ty::TypeAndMut {
ty: sig.inputs()[0],
mutbl: hir::Mutability::MutMutable
- })
+ }),
+ span
));
statements.push(Statement {
source_info: source_info,
debug!("build_ctor: def_id={:?} sig={:?} fields={:?}", def_id, sig, fields);
- let local_decls = local_decls_for_sig(&sig);
+ let local_decls = local_decls_for_sig(&sig, span);
let source_info = SourceInfo {
span: span,
for loc in callee_mir.vars_and_temps_iter() {
let mut local = callee_mir.local_decls[loc].clone();
- if let Some(ref mut source_info) = local.source_info {
- source_info.scope = scope_map[source_info.scope];
-
- source_info.span = callsite.location.span;
- }
+ local.source_info.scope = scope_map[local.source_info.scope];
+ local.source_info.span = callsite.location.span;
let idx = caller_mir.local_decls.push(local);
local_map.push(idx);
let ty = dest.ty(caller_mir, self.tcx);
- let temp = LocalDecl::new_temp(ty);
+ let temp = LocalDecl::new_temp(ty, callsite.location.span);
let tmp = caller_mir.local_decls.push(temp);
let tmp = Lvalue::Local(tmp);
arg.deref());
let ty = arg.ty(caller_mir, self.tcx);
- let ref_tmp = LocalDecl::new_temp(ty);
+ let ref_tmp = LocalDecl::new_temp(ty, callsite.location.span);
let ref_tmp = caller_mir.local_decls.push(ref_tmp);
let ref_tmp = Lvalue::Local(ref_tmp);
let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Consume(ref_tmp), ptr_ty);
- let cast_tmp = LocalDecl::new_temp(ptr_ty);
+ let cast_tmp = LocalDecl::new_temp(ptr_ty, callsite.location.span);
let cast_tmp = caller_mir.local_decls.push(cast_tmp);
let cast_tmp = Lvalue::Local(cast_tmp);
let ty = arg.ty(caller_mir, tcx);
- let arg_tmp = LocalDecl::new_temp(ty);
+ let arg_tmp = LocalDecl::new_temp(ty, callsite.location.span);
let arg_tmp = caller_mir.local_decls.push(arg_tmp);
let arg_tmp = Lvalue::Local(arg_tmp);
let no_stmts = self.source[loc.block].statements.len();
let new_temp = self.promoted.local_decls.push(
- LocalDecl::new_temp(self.source.local_decls[temp].ty));
+ LocalDecl::new_temp(self.source.local_decls[temp].ty,
+ self.source.local_decls[temp].source_info.span));
debug!("promote({:?} @ {:?}/{:?}, {:?})",
temp, loc, no_stmts, self.keep_original);
};
// Declare return pointer local
- let initial_locals = iter::once(LocalDecl::new_return_pointer(ty)).collect();
+ let initial_locals = iter::once(LocalDecl::new_return_pointer(ty, span))
+ .collect();
let mut promoter = Promoter {
promoted: Mir::new(
// Avoid a generic error for other uses of arguments.
if self.qualif.intersects(Qualif::FN_ARGUMENT) {
let decl = &self.mir.local_decls[index];
- span_err!(self.tcx.sess, decl.source_info.unwrap().span, E0022,
+ span_err!(self.tcx.sess, decl.source_info.span, E0022,
"arguments of constant functions can only \
be immutable by-value bindings");
return;
use syntax::ast;
use syntax_pos::{Span, DUMMY_SP};
+use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::indexed_vec::Idx;
fn mirbug(tcx: TyCtxt, span: Span, msg: &str) {
self.sanitize_type(rvalue, rval_ty);
}
+ fn visit_local_decl(&mut self, local_decl: &LocalDecl<'tcx>) {
+ self.super_local_decl(local_decl);
+ self.sanitize_type(local_decl, local_decl.ty);
+ }
+
fn visit_mir(&mut self, mir: &Mir<'tcx>) {
self.sanitize_type(&"return type", mir.return_ty);
for local_decl in &mir.local_decls {
fulfillment_cx: traits::FulfillmentContext<'tcx>,
last_span: Span,
body_id: ast::NodeId,
+ reported_errors: FxHashSet<(Ty<'tcx>, Span)>,
}
impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
fulfillment_cx: traits::FulfillmentContext::new(),
last_span: DUMMY_SP,
body_id: body_id,
+ reported_errors: FxHashSet(),
}
}
}
}
- fn typeck_mir(&mut self, mir: &Mir<'tcx>) {
+ fn check_local(&mut self, mir: &Mir<'gcx>, local: Local, local_decl: &LocalDecl<'gcx>) {
+ match mir.local_kind(local) {
+ LocalKind::ReturnPointer | LocalKind::Arg => {
+ // return values of normal functions are required to be
+ // sized by typeck, but return values of ADT constructors are
+ // not because we don't include a `Self: Sized` bounds on them.
+ //
+ // Unbound parts of arguments were never required to be Sized
+ // - maybe we should make that a warning.
+ return
+ }
+ LocalKind::Var | LocalKind::Temp => {}
+ }
+
+ let span = local_decl.source_info.span;
+ let ty = local_decl.ty;
+ if !ty.is_sized(self.tcx().global_tcx(), self.infcx.param_env(), span) {
+ // in current MIR construction, all non-control-flow rvalue
+ // expressions evaluate through `as_temp` or `into` a return
+ // slot or local, so to find all unsized rvalues it is enough
+ // to check all temps, return slots and locals.
+ if let None = self.reported_errors.replace((ty, span)) {
+ span_err!(self.tcx().sess, span, E0161,
+ "cannot move a value of type {0}: the size of {0} \
+ cannot be statically determined", ty);
+ }
+ }
+ }
+
+ fn typeck_mir(&mut self, mir: &Mir<'gcx>) {
self.last_span = mir.span;
debug!("run_on_mir: {:?}", mir.span);
+
+ for (local, local_decl) in mir.local_decls.iter_enumerated() {
+ self.check_local(mir, local, local_decl);
+ }
+
for block in mir.basic_blocks() {
for stmt in &block.statements {
if stmt.source_info.span != DUMMY_SP {
impl<'tcx> MirPass<'tcx> for TypeckMir {
fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource, mir: &mut Mir<'tcx>) {
- debug!("run_pass: {}", tcx.node_path_str(src.item_id()));
+ let item_id = src.item_id();
+ let def_id = tcx.hir.local_def_id(item_id);
+ debug!("run_pass: {}", tcx.item_path_str(def_id));
if tcx.sess.err_count() > 0 {
// compiling a broken program can obviously result in a
// broken MIR, so try not to report duplicate errors.
return;
}
- let param_env = ty::ParameterEnvironment::for_item(tcx, src.item_id());
+ let param_env = ty::ParameterEnvironment::for_item(tcx, item_id);
tcx.infer_ctxt(param_env, Reveal::UserFacing).enter(|infcx| {
- let mut checker = TypeChecker::new(&infcx, src.item_id());
+ let mut checker = TypeChecker::new(&infcx, item_id);
{
let mut verifier = TypeVerifier::new(&mut checker, mir);
verifier.visit_mir(mir);
}
fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
- self.elaborator.patch().new_temp(ty)
+ self.elaborator.patch().new_temp(ty, self.source_info.span)
}
fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
use rustc::ty::Ty;
use rustc::mir::*;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
+use syntax_pos::Span;
/// This struct represents a patch to MIR, which can add
/// new statements and basic blocks and patch over block
}
}
- pub fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
+ pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
let index = self.next_local;
self.next_local += 1;
- self.new_locals.push(LocalDecl::new_temp(ty));
+ self.new_locals.push(LocalDecl::new_temp(ty, span));
Local::new(index as usize)
}
// User variable types (including the user's name in a comment).
for local in mir.vars_iter() {
let var = &mir.local_decls[local];
- let (name, source_info) = if var.source_info.unwrap().scope == child {
- (var.name.unwrap(), var.source_info.unwrap())
+ let (name, source_info) = if var.source_info.scope == child {
+ (var.name.unwrap(), var.source_info)
} else {
// Not a variable or not declared in this scope.
continue;
```
"##,
-E0161: r##"
-A value was moved. However, its size was not known at compile time, and only
-values of a known size can be moved.
-
-Erroneous code example:
-
-```compile_fail
-#![feature(box_syntax)]
-
-fn main() {
- let array: &[isize] = &[1, 2, 3];
- let _x: Box<[isize]> = box *array;
- // error: cannot move a value of type [isize]: the size of [isize] cannot
- // be statically determined
-}
-```
-
-In Rust, you can only move a value when its size is known at compile time.
-
-To work around this restriction, consider "hiding" the value behind a reference:
-either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move
-it around as usual. Example:
-
-```
-#![feature(box_syntax)]
-
-fn main() {
- let array: &[isize] = &[1, 2, 3];
- let _x: Box<&[isize]> = box array; // ok!
-}
-```
-"##,
-
E0265: r##"
This error indicates that a static or constant references itself.
All statics and constants need to resolve to a value in an acyclic manner.
pub mod loops;
pub mod mir_stats;
pub mod no_asm;
-pub mod rvalues;
pub mod static_recursion;
use rustc::session::Session;
-use rustc::dep_graph::DepNode;
use rustc::hir::map::Map;
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::hir;
}
pub fn check_crate(sess: &Session, map: &Map) {
- let _task = map.dep_graph.in_task(DepNode::CheckLoops);
let krate = map.krate();
krate.visit_all_item_likes(&mut CheckLoopVisitor {
sess: sess,
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Checks that all rvalues in a crate have statically known size. check_crate
-// is the public starting point.
-
-use rustc::dep_graph::DepNode;
-use rustc::middle::expr_use_visitor as euv;
-use rustc::middle::mem_categorization as mc;
-use rustc::ty::{self, TyCtxt};
-use rustc::traits::Reveal;
-
-use rustc::hir;
-use rustc::hir::intravisit::{Visitor, NestedVisitorMap};
-use syntax::ast;
-use syntax_pos::Span;
-
-pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- let mut rvcx = RvalueContext { tcx: tcx };
- tcx.visit_all_item_likes_in_krate(DepNode::RvalueCheck, &mut rvcx.as_deep_visitor());
-}
-
-struct RvalueContext<'a, 'tcx: 'a> {
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for RvalueContext<'a, 'tcx> {
- fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
- NestedVisitorMap::None
- }
-
- fn visit_nested_body(&mut self, body_id: hir::BodyId) {
- let body = self.tcx.hir.body(body_id);
- self.tcx.infer_ctxt(body_id, Reveal::UserFacing).enter(|infcx| {
- let mut delegate = RvalueContextDelegate {
- tcx: infcx.tcx,
- param_env: &infcx.parameter_environment
- };
- euv::ExprUseVisitor::new(&mut delegate, &infcx).consume_body(body);
- });
- self.visit_body(body);
- }
-}
-
-struct RvalueContextDelegate<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- tcx: TyCtxt<'a, 'gcx, 'tcx>,
- param_env: &'a ty::ParameterEnvironment<'gcx>,
-}
-
-impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for RvalueContextDelegate<'a, 'gcx, 'tcx> {
- fn consume(&mut self,
- _: ast::NodeId,
- span: Span,
- cmt: mc::cmt<'tcx>,
- _: euv::ConsumeMode) {
- debug!("consume; cmt: {:?}; type: {:?}", *cmt, cmt.ty);
- let ty = self.tcx.lift_to_global(&cmt.ty).unwrap();
- if !ty.is_sized(self.tcx.global_tcx(), self.param_env, span) {
- span_err!(self.tcx.sess, span, E0161,
- "cannot move a value of type {0}: the size of {0} cannot be statically determined",
- ty);
- }
- }
-
- fn matched_pat(&mut self,
- _matched_pat: &hir::Pat,
- _cmt: mc::cmt,
- _mode: euv::MatchMode) {}
-
- fn consume_pat(&mut self,
- _consume_pat: &hir::Pat,
- _cmt: mc::cmt,
- _mode: euv::ConsumeMode) {
- }
-
- fn borrow(&mut self,
- _borrow_id: ast::NodeId,
- _borrow_span: Span,
- _cmt: mc::cmt,
- _loan_region: &'tcx ty::Region,
- _bk: ty::BorrowKind,
- _loan_cause: euv::LoanCause) {
- }
-
- fn decl_without_init(&mut self,
- _id: ast::NodeId,
- _span: Span) {
- }
-
- fn mutate(&mut self,
- _assignment_id: ast::NodeId,
- _assignment_span: Span,
- _assignee_cmt: mc::cmt,
- _mode: euv::MutateMode) {
- }
-}
// This compiler pass detects constants that refer to themselves
// recursively.
-use rustc::dep_graph::DepNode;
use rustc::hir::map as hir_map;
use rustc::session::{CompileResult, Session};
use rustc::hir::def::{Def, CtorKind};
}
pub fn check_crate<'hir>(sess: &Session, hir_map: &hir_map::Map<'hir>) -> CompileResult {
- let _task = hir_map.dep_graph.in_task(DepNode::CheckStaticRecursion);
-
let mut visitor = CheckCrateVisitor {
sess: sess,
hir_map: hir_map,
fn load_plugin(&mut self, span: Span, name: &str, args: Vec<ast::NestedMetaItem>) {
let registrar = self.reader.find_plugin_registrar(span, name);
- if let Some((lib, svh, index)) = registrar {
- let symbol = self.sess.generate_plugin_registrar_symbol(&svh, index);
+ if let Some((lib, disambiguator, index)) = registrar {
+ let symbol = self.sess.generate_plugin_registrar_symbol(disambiguator, index);
let fun = self.dylink_registrar(span, lib, symbol);
self.plugins.push(PluginRegistrar {
fun: fun,
self.prev_level
}
// Other `pub` items inherit levels from parents
- _ => {
+ hir::ItemConst(..) | hir::ItemEnum(..) | hir::ItemExternCrate(..) |
+ hir::ItemGlobalAsm(..) | hir::ItemFn(..) | hir::ItemMod(..) |
+ hir::ItemStatic(..) | hir::ItemStruct(..) | hir::ItemTrait(..) |
+ hir::ItemTy(..) | hir::ItemUnion(..) | hir::ItemUse(..) => {
if item.vis == hir::Public { self.prev_level } else { None }
}
};
}
}
}
- _ => {}
+ hir::ItemUse(..) | hir::ItemStatic(..) | hir::ItemConst(..) |
+ hir::ItemGlobalAsm(..) | hir::ItemTy(..) | hir::ItemMod(..) |
+ hir::ItemFn(..) | hir::ItemExternCrate(..) | hir::ItemDefaultImpl(..) => {}
}
// Mark all items in interfaces of reachable items as reachable
hir::ItemUse(..) => {}
// The interface is empty
hir::ItemDefaultImpl(..) => {}
+ // The interface is empty
+ hir::ItemGlobalAsm(..) => {}
// Visit everything
hir::ItemConst(..) | hir::ItemStatic(..) |
hir::ItemFn(..) | hir::ItemTy(..) => {
hir::ItemMod(..) => {}
// Checked in resolve
hir::ItemUse(..) => {}
+ // No subitems
+ hir::ItemGlobalAsm(..) => {}
// Subitems of these items have inherited publicity
hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) |
hir::ItemTy(..) => {
self.define(parent, ident, TypeNS, imported_binding);
}
+ ItemKind::GlobalAsm(..) => {}
+
ItemKind::Mod(..) if item.ident == keywords::Invalid.ident() => {} // Crate root
ItemKind::Mod(..) => {
LoadedMacro::ProcMacro(ext) => return ext,
};
- let ext = Rc::new(macro_rules::compile(&self.session.parse_sess, ¯o_def));
+ let ext = Rc::new(macro_rules::compile(&self.session.parse_sess,
+ &self.session.features,
+ ¯o_def));
self.macro_map.insert(def_id, ext.clone());
ext
}
}
}
- ItemKind::ExternCrate(_) | ItemKind::MacroDef(..) => {
+ ItemKind::ExternCrate(_) | ItemKind::MacroDef(..) | ItemKind::GlobalAsm(_)=> {
// do nothing, these are just around to be encoded
}
}
let def_id = self.definitions.local_def_id(item.id);
- let ext = Rc::new(macro_rules::compile(&self.session.parse_sess, item));
+ let ext = Rc::new(macro_rules::compile(&self.session.parse_sess,
+ &self.session.features,
+ item));
self.macro_map.insert(def_id, ext);
*legacy_scope = LegacyScope::Binding(self.arenas.alloc_legacy_binding(LegacyBinding {
parent: Cell::new(*legacy_scope), name: ident.name, def_id: def_id, span: item.span,
Def::AssociatedTy(..) |
Def::AssociatedConst(..) |
Def::PrimTy(_) |
+ Def::GlobalAsm(_) |
Def::Err => {
span_bug!(span,
"process_def_kind for unexpected item: {:?}",
Def::SelfTy(..) |
Def::Label(..) |
Def::Macro(..) |
+ Def::GlobalAsm(..) |
Def::Err => None,
}
}
Layout::Vector { .. } => {
Some(Reg {
- kind: RegKind::Integer,
+ kind: RegKind::Vector,
size: self.size(ccx)
})
}
llvm::LLVMMDNodeInContext(bcx.ccx.llcx(), &val, 1));
}
}
+
+pub fn trans_global_asm<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ ga: &hir::GlobalAsm) {
+ let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
+ unsafe {
+ llvm::LLVMRustAppendModuleInlineAsm(ccx.llmod(), asm.as_ptr());
+ }
+}
use flate;
use syntax::ast;
use syntax::attr;
-use syntax::symbol::Symbol;
use syntax_pos::Span;
/// The LLVM module name containing crate-metadata. This includes a `.` on
"rust_out".to_string()
}
-pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap,
- name: &str)
- -> LinkMeta {
+pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap) -> LinkMeta {
let r = LinkMeta {
- crate_name: Symbol::intern(name),
crate_hash: Svh::new(incremental_hashes_map[&DepNode::Krate].to_smaller_hash()),
};
info!("{:?}", r);
}
fn emit_metadata<'a>(sess: &'a Session, trans: &CrateTranslation, out_filename: &Path) {
- let result = fs::File::create(out_filename).and_then(|mut f| f.write_all(&trans.metadata));
+ let result = fs::File::create(out_filename).and_then(|mut f| {
+ f.write_all(&trans.metadata.raw_data)
+ });
+
if let Err(e) = result {
sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
}
use util::nodemap::FxHashMap;
use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE};
use rustc::session::config;
+use rustc::ty::TyCtxt;
use syntax::attr;
use trans_item::TransItem;
}
if let Some(id) = scx.sess().derive_registrar_fn.get() {
- let svh = &scx.link_meta().crate_hash;
let def_id = scx.tcx().hir.local_def_id(id);
let idx = def_id.index;
- let registrar = scx.sess().generate_derive_registrar_symbol(svh, idx);
+ let disambiguator = scx.sess().local_crate_disambiguator();
+ let registrar = scx.sess().generate_derive_registrar_symbol(disambiguator, idx);
local_crate.push((registrar, SymbolExportLevel::C));
}
if scx.sess().crate_types.borrow().contains(&config::CrateTypeDylib) {
- local_crate.push((scx.metadata_symbol_name(),
+ local_crate.push((metadata_symbol_name(scx.tcx()),
SymbolExportLevel::Rust));
}
}
}
+pub fn metadata_symbol_name(tcx: TyCtxt) -> String {
+ format!("rust_metadata_{}_{}",
+ tcx.crate_name(LOCAL_CRATE),
+ tcx.crate_disambiguator(LOCAL_CRATE))
+}
+
pub fn crate_export_threshold(crate_type: config::CrateType)
-> SymbolExportLevel {
match crate_type {
if let Some(id) = node_id {
if scx.sess().plugin_registrar_fn.get() == Some(id) {
- let svh = &scx.link_meta().crate_hash;
let idx = def_id.index;
- return scx.sess().generate_plugin_registrar_symbol(svh, idx);
+ let disambiguator = scx.sess().local_crate_disambiguator();
+ return scx.sess().generate_plugin_registrar_symbol(disambiguator, idx);
}
if scx.sess().derive_registrar_fn.get() == Some(id) {
- let svh = &scx.link_meta().crate_hash;
let idx = def_id.index;
- return scx.sess().generate_derive_registrar_symbol(svh, idx);
+ let disambiguator = scx.sess().local_crate_disambiguator();
+ return scx.sess().generate_derive_registrar_symbol(disambiguator, idx);
}
}
use back::link;
use back::linker::LinkerInfo;
use back::symbol_export::{self, ExportedSymbols};
-use llvm::{Linkage, ValueRef, Vector, get_param};
+use llvm::{ContextRef, Linkage, ModuleRef, ValueRef, Vector, get_param};
use llvm;
use rustc::hir::def_id::LOCAL_CRATE;
use middle::lang_items::StartFnLangItem;
+use middle::cstore::EncodedMetadata;
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::dep_graph::{AssertDepGraphSafe, DepNode, WorkProduct};
+use rustc::dep_graph::{AssertDepGraphSafe, DepNode};
+use rustc::middle::cstore::LinkMeta;
use rustc::hir::map as hir_map;
use rustc::util::common::time;
use session::config::{self, NoDebugInfo};
use common::{type_is_zero_size, val_ty};
use common;
use consts;
-use context::{SharedCrateContext, CrateContextList};
+use context::{self, LocalCrateContext, SharedCrateContext, Stats};
use debuginfo;
use declare;
use machine;
s.bytes().any(|b| b == 0)
}
-fn write_metadata(cx: &SharedCrateContext,
- exported_symbols: &NodeSet) -> Vec<u8> {
+fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>,
+ link_meta: &LinkMeta,
+ exported_symbols: &NodeSet)
+ -> (ContextRef, ModuleRef, EncodedMetadata) {
use flate;
+ let (metadata_llcx, metadata_llmod) = unsafe {
+ context::create_context_and_module(tcx.sess, "metadata")
+ };
+
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum MetadataKind {
None,
Compressed
}
- let kind = cx.sess().crate_types.borrow().iter().map(|ty| {
+ let kind = tcx.sess.crate_types.borrow().iter().map(|ty| {
match *ty {
config::CrateTypeExecutable |
config::CrateTypeStaticlib |
}).max().unwrap();
if kind == MetadataKind::None {
- return Vec::new();
+ return (metadata_llcx, metadata_llmod, EncodedMetadata {
+ raw_data: vec![],
+ hashes: vec![],
+ });
}
- let cstore = &cx.tcx().sess.cstore;
- let metadata = cstore.encode_metadata(cx.tcx(),
- cx.link_meta(),
+ let cstore = &tcx.sess.cstore;
+ let metadata = cstore.encode_metadata(tcx,
+ &link_meta,
exported_symbols);
if kind == MetadataKind::Uncompressed {
- return metadata;
+ return (metadata_llcx, metadata_llmod, metadata);
}
assert!(kind == MetadataKind::Compressed);
let mut compressed = cstore.metadata_encoding_version().to_vec();
- compressed.extend_from_slice(&flate::deflate_bytes(&metadata));
+ compressed.extend_from_slice(&flate::deflate_bytes(&metadata.raw_data));
- let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed);
- let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
- let name = cx.metadata_symbol_name();
+ let llmeta = C_bytes_in_context(metadata_llcx, &compressed);
+ let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false);
+ let name = symbol_export::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
- llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr())
+ llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst).to_ref(), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name =
- cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target);
+ tcx.sess.cstore.metadata_section_name(&tcx.sess.target.target);
let name = CString::new(section_name).unwrap();
llvm::LLVMSetSection(llglobal, name.as_ptr());
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
let directive = CString::new(directive).unwrap();
- llvm::LLVMSetModuleInlineAsm(cx.metadata_llmod(), directive.as_ptr())
+ llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
}
- return metadata;
+ return (metadata_llcx, metadata_llmod, metadata);
}
/// Find any symbols that are defined in one compilation unit, but not declared
/// in any other compilation unit. Give these symbols internal linkage.
fn internalize_symbols<'a, 'tcx>(sess: &Session,
- ccxs: &CrateContextList<'a, 'tcx>,
+ scx: &SharedCrateContext<'a, 'tcx>,
+ llvm_modules: &[ModuleLlvm],
symbol_map: &SymbolMap<'tcx>,
exported_symbols: &ExportedSymbols) {
let export_threshold =
.map(|&(ref name, _)| &name[..])
.collect::<FxHashSet<&str>>();
- let scx = ccxs.shared();
let tcx = scx.tcx();
let incr_comp = sess.opts.debugging_opts.incremental.is_some();
// incremental compilation, we don't need to collect. See below for more
// information.
if !incr_comp {
- for ccx in ccxs.iter_need_trans() {
- for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ for ll in llvm_modules {
+ for val in iter_globals(ll.llmod).chain(iter_functions(ll.llmod)) {
let linkage = llvm::LLVMRustGetLinkage(val);
// We only care about external declarations (not definitions)
// and available_externally definitions.
// Examine each external definition. If the definition is not used in
// any other compilation unit, and is not reachable from other crates,
// then give it internal linkage.
- for ccx in ccxs.iter_need_trans() {
- for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ for ll in llvm_modules {
+ for val in iter_globals(ll.llmod).chain(iter_functions(ll.llmod)) {
let linkage = llvm::LLVMRustGetLinkage(val);
let is_externally_visible = (linkage == llvm::Linkage::ExternalLinkage) ||
// when using MSVC linker. We do this only for data, as linker can fix up
// code references on its own.
// See #26591, #27438
-fn create_imps(cx: &CrateContextList) {
+fn create_imps(sess: &Session,
+ llvm_modules: &[ModuleLlvm]) {
// The x86 ABI seems to require that leading underscores are added to symbol
// names, so we need an extra underscore on 32-bit. There's also a leading
// '\x01' here which disables LLVM's symbol mangling (e.g. no extra
// underscores added in front).
- let prefix = if cx.shared().sess().target.target.target_pointer_width == "32" {
+ let prefix = if sess.target.target.target_pointer_width == "32" {
"\x01__imp__"
} else {
"\x01__imp_"
};
unsafe {
- for ccx in cx.iter_need_trans() {
- let exported: Vec<_> = iter_globals(ccx.llmod())
+ for ll in llvm_modules {
+ let exported: Vec<_> = iter_globals(ll.llmod)
.filter(|&val| {
llvm::LLVMRustGetLinkage(val) ==
llvm::Linkage::ExternalLinkage &&
})
.collect();
- let i8p_ty = Type::i8p(&ccx);
+ let i8p_ty = Type::i8p_llcx(ll.llcx);
for val in exported {
let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
let mut imp_name = prefix.as_bytes().to_vec();
imp_name.extend(name.to_bytes());
let imp_name = CString::new(imp_name).unwrap();
- let imp = llvm::LLVMAddGlobal(ccx.llmod(),
+ let imp = llvm::LLVMAddGlobal(ll.llmod,
i8p_ty.to_ref(),
imp_name.as_ptr() as *const _);
let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
// particular items that will be processed.
let krate = tcx.hir.krate();
- let ty::CrateAnalysis { reachable, name, .. } = analysis;
+ let ty::CrateAnalysis { reachable, .. } = analysis;
let exported_symbols = find_exported_symbols(tcx, reachable);
let check_overflow = tcx.sess.overflow_checks();
- let link_meta = link::build_link_meta(incremental_hashes_map, &name);
+ let link_meta = link::build_link_meta(incremental_hashes_map);
let shared_ccx = SharedCrateContext::new(tcx,
- link_meta.clone(),
exported_symbols,
check_overflow);
// Translate the metadata.
- let metadata = time(tcx.sess.time_passes(), "write metadata", || {
- write_metadata(&shared_ccx, shared_ccx.exported_symbols())
- });
+ let (metadata_llcx, metadata_llmod, metadata) =
+ time(tcx.sess.time_passes(), "write metadata", || {
+ write_metadata(tcx, &link_meta, shared_ccx.exported_symbols())
+ });
let metadata_module = ModuleTranslation {
name: link::METADATA_MODULE_NAME.to_string(),
symbol_name_hash: 0, // we always rebuild metadata, at least for now
source: ModuleSource::Translated(ModuleLlvm {
- llcx: shared_ccx.metadata_llcx(),
- llmod: shared_ccx.metadata_llmod(),
+ llcx: metadata_llcx,
+ llmod: metadata_llmod,
}),
};
let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
let empty_exported_symbols = ExportedSymbols::empty();
let linker_info = LinkerInfo::new(&shared_ccx, &empty_exported_symbols);
return CrateTranslation {
+ crate_name: tcx.crate_name(LOCAL_CRATE),
modules: vec![],
metadata_module: metadata_module,
link: link_meta,
let symbol_map = Rc::new(symbol_map);
- let previous_work_products = trans_reuse_previous_work_products(&shared_ccx,
- &codegen_units,
- &symbol_map);
-
- let crate_context_list = CrateContextList::new(&shared_ccx,
- codegen_units,
- previous_work_products,
- symbol_map.clone());
- let modules: Vec<_> = crate_context_list.iter_all()
- .map(|ccx| {
- let source = match ccx.previous_work_product() {
- Some(buf) => ModuleSource::Preexisting(buf.clone()),
- None => ModuleSource::Translated(ModuleLlvm {
- llcx: ccx.llcx(),
- llmod: ccx.llmod(),
- }),
- };
-
- ModuleTranslation {
- name: String::from(ccx.codegen_unit().name()),
- symbol_name_hash: ccx.codegen_unit()
- .compute_symbol_name_hash(&shared_ccx,
- &symbol_map),
- source: source,
- }
+ let mut all_stats = Stats::default();
+ let modules: Vec<ModuleTranslation> = codegen_units
+ .into_iter()
+ .map(|cgu| {
+ let dep_node = cgu.work_product_dep_node();
+ let (stats, module) =
+ tcx.dep_graph.with_task(dep_node,
+ AssertDepGraphSafe(&shared_ccx),
+ AssertDepGraphSafe((cgu, symbol_map.clone())),
+ module_translation);
+ all_stats.extend(stats);
+ module
})
.collect();
- assert_module_sources::assert_module_sources(tcx, &modules);
+ fn module_translation<'a, 'tcx>(
+ scx: AssertDepGraphSafe<&SharedCrateContext<'a, 'tcx>>,
+ args: AssertDepGraphSafe<(CodegenUnit<'tcx>, Rc<SymbolMap<'tcx>>)>)
+ -> (Stats, ModuleTranslation)
+ {
+ // FIXME(#40304): We ought to be using the id as a key and some queries, I think.
+ let AssertDepGraphSafe(scx) = scx;
+ let AssertDepGraphSafe((cgu, symbol_map)) = args;
+
+ let cgu_name = String::from(cgu.name());
+ let cgu_id = cgu.work_product_id();
+ let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &symbol_map);
+
+ // Check whether there is a previous work-product we can
+ // re-use. Not only must the file exist, and the inputs not
+ // be dirty, but the hash of the symbols we will generate must
+ // be the same.
+ let previous_work_product =
+ scx.dep_graph().previous_work_product(&cgu_id).and_then(|work_product| {
+ if work_product.input_hash == symbol_name_hash {
+ debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
+ Some(work_product)
+ } else {
+ if scx.sess().opts.debugging_opts.incremental_info {
+ println!("incremental: CGU `{}` invalidated because of \
+ changed partitioning hash.",
+ cgu.name());
+ }
+ debug!("trans_reuse_previous_work_products: \
+ not reusing {:?} because hash changed to {:?}",
+ work_product, symbol_name_hash);
+ None
+ }
+ });
- // Instantiate translation items without filling out definitions yet...
- for ccx in crate_context_list.iter_need_trans() {
- let dep_node = ccx.codegen_unit().work_product_dep_node();
- tcx.dep_graph.with_task(dep_node,
- ccx,
- AssertDepGraphSafe(symbol_map.clone()),
- trans_decl_task);
-
- fn trans_decl_task<'a, 'tcx>(ccx: CrateContext<'a, 'tcx>,
- symbol_map: AssertDepGraphSafe<Rc<SymbolMap<'tcx>>>) {
- // FIXME(#40304): Instead of this, the symbol-map should be an
- // on-demand thing that we compute.
- let AssertDepGraphSafe(symbol_map) = symbol_map;
- let cgu = ccx.codegen_unit();
- let trans_items = cgu.items_in_deterministic_order(ccx.tcx(), &symbol_map);
- for (trans_item, linkage) in trans_items {
+ if let Some(buf) = previous_work_product {
+ // Don't need to translate this module.
+ let module = ModuleTranslation {
+ name: cgu_name,
+ symbol_name_hash,
+ source: ModuleSource::Preexisting(buf.clone())
+ };
+ return (Stats::default(), module);
+ }
+
+ // Instantiate translation items without filling out definitions yet...
+ let lcx = LocalCrateContext::new(scx, cgu, symbol_map.clone());
+ let module = {
+ let ccx = CrateContext::new(scx, &lcx);
+ let trans_items = ccx.codegen_unit()
+ .items_in_deterministic_order(ccx.tcx(), &symbol_map);
+ for &(trans_item, linkage) in &trans_items {
trans_item.predefine(&ccx, linkage);
}
- }
- }
- // ... and now that we have everything pre-defined, fill out those definitions.
- for ccx in crate_context_list.iter_need_trans() {
- let dep_node = ccx.codegen_unit().work_product_dep_node();
- tcx.dep_graph.with_task(dep_node,
- ccx,
- AssertDepGraphSafe(symbol_map.clone()),
- trans_def_task);
-
- fn trans_def_task<'a, 'tcx>(ccx: CrateContext<'a, 'tcx>,
- symbol_map: AssertDepGraphSafe<Rc<SymbolMap<'tcx>>>) {
- // FIXME(#40304): Instead of this, the symbol-map should be an
- // on-demand thing that we compute.
- let AssertDepGraphSafe(symbol_map) = symbol_map;
- let cgu = ccx.codegen_unit();
- let trans_items = cgu.items_in_deterministic_order(ccx.tcx(), &symbol_map);
- for (trans_item, _) in trans_items {
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(trans_item, _) in &trans_items {
trans_item.define(&ccx);
}
if ccx.sess().opts.debuginfo != NoDebugInfo {
debuginfo::finalize(&ccx);
}
- }
+
+ ModuleTranslation {
+ name: cgu_name,
+ symbol_name_hash,
+ source: ModuleSource::Translated(ModuleLlvm {
+ llcx: ccx.llcx(),
+ llmod: ccx.llmod(),
+ })
+ }
+ };
+
+ (lcx.into_stats(), module)
}
+ assert_module_sources::assert_module_sources(tcx, &modules);
+
symbol_names_test::report_symbol_names(&shared_ccx);
if shared_ccx.sess().trans_stats() {
- let stats = shared_ccx.stats();
println!("--- trans stats ---");
- println!("n_glues_created: {}", stats.n_glues_created.get());
- println!("n_null_glues: {}", stats.n_null_glues.get());
- println!("n_real_glues: {}", stats.n_real_glues.get());
+ println!("n_glues_created: {}", all_stats.n_glues_created.get());
+ println!("n_null_glues: {}", all_stats.n_null_glues.get());
+ println!("n_real_glues: {}", all_stats.n_real_glues.get());
- println!("n_fns: {}", stats.n_fns.get());
- println!("n_inlines: {}", stats.n_inlines.get());
- println!("n_closures: {}", stats.n_closures.get());
+ println!("n_fns: {}", all_stats.n_fns.get());
+ println!("n_inlines: {}", all_stats.n_inlines.get());
+ println!("n_closures: {}", all_stats.n_closures.get());
println!("fn stats:");
- stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
+ all_stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
insns_b.cmp(&insns_a)
});
- for tuple in stats.fn_stats.borrow().iter() {
+ for tuple in all_stats.fn_stats.borrow().iter() {
match *tuple {
(ref name, insns) => {
println!("{} insns, {}", insns, *name);
}
if shared_ccx.sess().count_llvm_insns() {
- for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
+ for (k, v) in all_stats.llvm_insns.borrow().iter() {
println!("{:7} {}", *v, *k);
}
}
let exported_symbols = ExportedSymbols::compute_from(&shared_ccx,
&symbol_map);
+ // Get the list of llvm modules we created. We'll do a few wacky
+ // transforms on them now.
+
+ let llvm_modules: Vec<_> =
+ modules.iter()
+ .filter_map(|module| match module.source {
+ ModuleSource::Translated(llvm) => Some(llvm),
+ _ => None,
+ })
+ .collect();
+
// Now that we have all symbols that are exported from the CGUs of this
// crate, we can run the `internalize_symbols` pass.
time(shared_ccx.sess().time_passes(), "internalize symbols", || {
internalize_symbols(sess,
- &crate_context_list,
+ &shared_ccx,
+ &llvm_modules,
&symbol_map,
&exported_symbols);
});
if sess.target.target.options.is_like_msvc &&
sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
- create_imps(&crate_context_list);
+ create_imps(sess, &llvm_modules);
}
let linker_info = LinkerInfo::new(&shared_ccx, &exported_symbols);
});
CrateTranslation {
+ crate_name: tcx.crate_name(LOCAL_CRATE),
modules: modules,
metadata_module: metadata_module,
link: link_meta,
}
}
-/// For each CGU, identify if we can reuse an existing object file (or
-/// maybe other context).
-fn trans_reuse_previous_work_products(scx: &SharedCrateContext,
- codegen_units: &[CodegenUnit],
- symbol_map: &SymbolMap)
- -> Vec<Option<WorkProduct>> {
- debug!("trans_reuse_previous_work_products()");
- codegen_units
- .iter()
- .map(|cgu| {
- let id = cgu.work_product_id();
-
- let hash = cgu.compute_symbol_name_hash(scx, symbol_map);
-
- debug!("trans_reuse_previous_work_products: id={:?} hash={}", id, hash);
-
- if let Some(work_product) = scx.dep_graph().previous_work_product(&id) {
- if work_product.input_hash == hash {
- debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
- return Some(work_product);
- } else {
- if scx.sess().opts.debugging_opts.incremental_info {
- println!("incremental: CGU `{}` invalidated because of \
- changed partitioning hash.",
- cgu.name());
- }
- debug!("trans_reuse_previous_work_products: \
- not reusing {:?} because hash changed to {:?}",
- work_product, hash);
- }
- }
-
- None
- })
- .collect()
-}
-
fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
-> (Vec<CodegenUnit<'tcx>>, SymbolMap<'tcx>) {
let time_passes = scx.sess().time_passes();
Class::Sse => {
let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count();
*i += vec_len;
- Some(match size {
- 4 => Reg::f32(),
- 8 => Reg::f64(),
- _ => {
- Reg {
- kind: RegKind::Vector,
- size: Size::from_bytes(vec_len as u64 * 8)
- }
+ Some(if vec_len == 1 {
+ match size {
+ 4 => Reg::f32(),
+ _ => Reg::f64()
+ }
+ } else {
+ Reg {
+ kind: RegKind::Vector,
+ size: Size::from_bytes(vec_len as u64 * 8)
}
})
}
collect_neighbours(scx, instance, &mut neighbors);
}
+ TransItem::GlobalAsm(..) => {
+ recursion_depth_reset = None;
+ }
}
record_inlining_canditates(scx.tcx(), starting_point, &neighbors[..], inlining_map);
}
}
}
+ hir::ItemGlobalAsm(..) => {
+ debug!("RootCollector: ItemGlobalAsm({})",
+ def_id_to_string(self.scx.tcx(),
+ self.scx.tcx().hir.local_def_id(item.id)));
+ self.output.push(TransItem::GlobalAsm(item.id));
+ }
hir::ItemStatic(..) => {
debug!("RootCollector: ItemStatic({})",
def_id_to_string(self.scx.tcx(),
use llvm;
use llvm::{ContextRef, ModuleRef, ValueRef};
-use rustc::dep_graph::{DepGraph, DepGraphSafe, DepNode, DepTrackingMap,
- DepTrackingMapConfig, WorkProduct};
-use middle::cstore::LinkMeta;
+use rustc::dep_graph::{DepGraph, DepGraphSafe, DepNode, DepTrackingMap, DepTrackingMapConfig};
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::traits;
use syntax_pos::DUMMY_SP;
use abi::Abi;
+#[derive(Clone, Default)]
pub struct Stats {
pub n_glues_created: Cell<usize>,
pub n_null_glues: Cell<usize>,
pub fn_stats: RefCell<Vec<(String, usize)> >,
}
+impl Stats {
+ pub fn extend(&mut self, stats: Stats) {
+ self.n_glues_created.set(self.n_glues_created.get() + stats.n_glues_created.get());
+ self.n_null_glues.set(self.n_null_glues.get() + stats.n_null_glues.get());
+ self.n_real_glues.set(self.n_real_glues.get() + stats.n_real_glues.get());
+ self.n_fns.set(self.n_fns.get() + stats.n_fns.get());
+ self.n_inlines.set(self.n_inlines.get() + stats.n_inlines.get());
+ self.n_closures.set(self.n_closures.get() + stats.n_closures.get());
+ self.n_llvm_insns.set(self.n_llvm_insns.get() + stats.n_llvm_insns.get());
+ self.llvm_insns.borrow_mut().extend(
+ stats.llvm_insns.borrow().iter()
+ .map(|(key, value)| (key.clone(), value.clone())));
+ self.fn_stats.borrow_mut().append(&mut *stats.fn_stats.borrow_mut());
+ }
+}
+
/// The shared portion of a `CrateContext`. There is one `SharedCrateContext`
/// per crate. The data here is shared between all compilation units of the
/// crate, so it must not contain references to any LLVM data structures
/// (aside from metadata-related ones).
pub struct SharedCrateContext<'a, 'tcx: 'a> {
- metadata_llmod: ModuleRef,
- metadata_llcx: ContextRef,
-
exported_symbols: NodeSet,
- link_meta: LinkMeta,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
empty_param_env: ty::ParameterEnvironment<'tcx>,
- stats: Stats,
check_overflow: bool,
use_dll_storage_attrs: bool,
pub struct LocalCrateContext<'tcx> {
llmod: ModuleRef,
llcx: ContextRef,
- previous_work_product: Option<WorkProduct>,
+ stats: Stats,
codegen_unit: CodegenUnit<'tcx>,
needs_unwind_cleanup_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
/// Cache instances of monomorphic and polymorphic items
}
}
-/// This list owns a number of LocalCrateContexts and binds them to their common
-/// SharedCrateContext. This type just exists as a convenience, something to
-/// pass around all LocalCrateContexts with and get an iterator over them.
-pub struct CrateContextList<'a, 'tcx: 'a> {
- shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccxs: Vec<LocalCrateContext<'tcx>>,
-}
-
-impl<'a, 'tcx: 'a> CrateContextList<'a, 'tcx> {
- pub fn new(shared_ccx: &'a SharedCrateContext<'a, 'tcx>,
- codegen_units: Vec<CodegenUnit<'tcx>>,
- previous_work_products: Vec<Option<WorkProduct>>,
- symbol_map: Rc<SymbolMap<'tcx>>)
- -> CrateContextList<'a, 'tcx> {
- CrateContextList {
- shared: shared_ccx,
- local_ccxs: codegen_units.into_iter().zip(previous_work_products).map(|(cgu, wp)| {
- LocalCrateContext::new(shared_ccx, cgu, wp, symbol_map.clone())
- }).collect()
- }
- }
-
- /// Iterate over all crate contexts, whether or not they need
- /// translation. That is, whether or not a `.o` file is available
- /// for re-use from a previous incr. comp.).
- pub fn iter_all<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> {
- CrateContextIterator {
- shared: self.shared,
- index: 0,
- local_ccxs: &self.local_ccxs[..],
- filter_to_previous_work_product_unavail: false,
- }
- }
-
- /// Iterator over all CCX that need translation (cannot reuse results from
- /// previous incr. comp.).
- pub fn iter_need_trans<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> {
- CrateContextIterator {
- shared: self.shared,
- index: 0,
- local_ccxs: &self.local_ccxs[..],
- filter_to_previous_work_product_unavail: true,
- }
- }
-
- pub fn shared(&self) -> &'a SharedCrateContext<'a, 'tcx> {
- self.shared
- }
-}
-
/// A CrateContext value binds together one LocalCrateContext with the
/// SharedCrateContext. It exists as a convenience wrapper, so we don't have to
/// pass around (SharedCrateContext, LocalCrateContext) tuples all over trans.
pub struct CrateContext<'a, 'tcx: 'a> {
shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccxs: &'a [LocalCrateContext<'tcx>],
- /// The index of `local` in `local_ccxs`. This is used in
- /// `maybe_iter(true)` to identify the original `LocalCrateContext`.
- index: usize,
+ local_ccx: &'a LocalCrateContext<'tcx>,
}
-impl<'a, 'tcx> DepGraphSafe for CrateContext<'a, 'tcx> {
-}
-
-pub struct CrateContextIterator<'a, 'tcx: 'a> {
- shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccxs: &'a [LocalCrateContext<'tcx>],
- index: usize,
-
- /// if true, only return results where `previous_work_product` is none
- filter_to_previous_work_product_unavail: bool,
+impl<'a, 'tcx> CrateContext<'a, 'tcx> {
+ pub fn new(shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccx: &'a LocalCrateContext<'tcx>)
+ -> Self {
+ CrateContext { shared, local_ccx }
+ }
}
-impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> {
- type Item = CrateContext<'a, 'tcx>;
-
- fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
- loop {
- if self.index >= self.local_ccxs.len() {
- return None;
- }
-
- let index = self.index;
- self.index += 1;
-
- let ccx = CrateContext {
- shared: self.shared,
- index: index,
- local_ccxs: self.local_ccxs,
- };
-
- if
- self.filter_to_previous_work_product_unavail &&
- ccx.previous_work_product().is_some()
- {
- continue;
- }
-
- return Some(ccx);
- }
- }
+impl<'a, 'tcx> DepGraphSafe for CrateContext<'a, 'tcx> {
}
pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
!is_any_library(sess) && get_reloc_model(sess) == llvm::RelocMode::PIC
}
-unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
+pub unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
let llcx = llvm::LLVMContextCreate();
let mod_name = CString::new(mod_name).unwrap();
let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>,
- link_meta: LinkMeta,
exported_symbols: NodeSet,
check_overflow: bool)
-> SharedCrateContext<'b, 'tcx> {
- let (metadata_llcx, metadata_llmod) = unsafe {
- create_context_and_module(&tcx.sess, "metadata")
- };
-
// An interesting part of Windows which MSVC forces our hand on (and
// apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
// attributes in LLVM IR as well as native dependencies (in C these
let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc;
SharedCrateContext {
- metadata_llmod: metadata_llmod,
- metadata_llcx: metadata_llcx,
exported_symbols: exported_symbols,
- link_meta: link_meta,
empty_param_env: tcx.empty_parameter_environment(),
tcx: tcx,
- stats: Stats {
- n_glues_created: Cell::new(0),
- n_null_glues: Cell::new(0),
- n_real_glues: Cell::new(0),
- n_fns: Cell::new(0),
- n_inlines: Cell::new(0),
- n_closures: Cell::new(0),
- n_llvm_insns: Cell::new(0),
- llvm_insns: RefCell::new(FxHashMap()),
- fn_stats: RefCell::new(Vec::new()),
- },
check_overflow: check_overflow,
use_dll_storage_attrs: use_dll_storage_attrs,
translation_items: RefCell::new(FxHashSet()),
ty.is_sized(self.tcx, &self.empty_param_env, DUMMY_SP)
}
- pub fn metadata_llmod(&self) -> ModuleRef {
- self.metadata_llmod
- }
-
- pub fn metadata_llcx(&self) -> ContextRef {
- self.metadata_llcx
- }
-
pub fn exported_symbols<'a>(&'a self) -> &'a NodeSet {
&self.exported_symbols
}
&self.project_cache
}
- pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
- &self.link_meta
- }
-
pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.tcx
}
&self.tcx.dep_graph
}
- pub fn stats<'a>(&'a self) -> &'a Stats {
- &self.stats
- }
-
pub fn use_dll_storage_attrs(&self) -> bool {
self.use_dll_storage_attrs
}
pub fn translation_items(&self) -> &RefCell<FxHashSet<TransItem<'tcx>>> {
&self.translation_items
}
-
- pub fn metadata_symbol_name(&self) -> String {
- format!("rust_metadata_{}_{}",
- self.link_meta().crate_name,
- self.link_meta().crate_hash)
- }
}
impl<'tcx> LocalCrateContext<'tcx> {
- fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
- codegen_unit: CodegenUnit<'tcx>,
- previous_work_product: Option<WorkProduct>,
- symbol_map: Rc<SymbolMap<'tcx>>)
- -> LocalCrateContext<'tcx> {
+ pub fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
+ codegen_unit: CodegenUnit<'tcx>,
+ symbol_map: Rc<SymbolMap<'tcx>>)
+ -> LocalCrateContext<'tcx> {
unsafe {
// Append ".rs" to LLVM module identifier.
//
let local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
- previous_work_product: previous_work_product,
+ stats: Stats::default(),
codegen_unit: codegen_unit,
needs_unwind_cleanup_cache: RefCell::new(FxHashMap()),
instances: RefCell::new(FxHashMap()),
assert!(local_ccxs.len() == 1);
CrateContext {
shared: shared,
- index: 0,
- local_ccxs: local_ccxs
+ local_ccx: &local_ccxs[0]
}
}
+
+ pub fn into_stats(self) -> Stats {
+ self.stats
+ }
}
impl<'b, 'tcx> CrateContext<'b, 'tcx> {
}
fn local(&self) -> &'b LocalCrateContext<'tcx> {
- &self.local_ccxs[self.index]
+ self.local_ccx
}
pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.local().llcx
}
- pub fn previous_work_product(&self) -> Option<&WorkProduct> {
- self.local().previous_work_product.as_ref()
- }
-
pub fn codegen_unit(&self) -> &CodegenUnit<'tcx> {
&self.local().codegen_unit
}
unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) }
}
- pub fn exported_symbols<'a>(&'a self) -> &'a NodeSet {
- &self.shared.exported_symbols
- }
-
- pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
- &self.shared.link_meta
- }
-
pub fn needs_unwind_cleanup_cache(&self) -> &RefCell<FxHashMap<Ty<'tcx>, bool>> {
&self.local().needs_unwind_cleanup_cache
}
}
pub fn stats<'a>(&'a self) -> &'a Stats {
- &self.shared.stats
+ &self.local().stats
}
pub fn int_type(&self) -> Type {
let mut has_variables = BitVector::new(mir.visibility_scopes.len());
for var in mir.vars_iter() {
let decl = &mir.local_decls[var];
- has_variables.insert(decl.source_info.unwrap().scope.index());
+ has_variables.insert(decl.source_info.scope.index());
}
// Instantiate all scopes.
DICompositeType, DILexicalBlock, DIFlags};
use rustc::hir::def::CtorKind;
-use rustc::hir::def_id::DefId;
+use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::ty::fold::TypeVisitor;
use rustc::ty::subst::Substs;
use rustc::ty::util::TypeIdHasher;
};
debug!("compile_unit_metadata: {:?}", compile_unit_name);
- let producer = format!("rustc version {}",
+ // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
+ let producer = format!("clang LLVM (rustc version {})",
(option_env!("CFG_VERSION")).expect("CFG_VERSION"));
let compile_unit_name = compile_unit_name.as_ptr();
};
fn fallback_path(scc: &SharedCrateContext) -> CString {
- CString::new(scc.link_meta().crate_name.to_string()).unwrap()
+ CString::new(scc.tcx().crate_name(LOCAL_CRATE).to_string()).unwrap()
}
}
// visible). It might better to use the `exported_items` set from
// `driver::CrateAnalysis` in the future, but (atm) this set is not
// available in the translation pass.
- !cx.exported_symbols().contains(&node_id)
+ !cx.shared().exported_symbols().contains(&node_id)
}
#[allow(non_snake_case)]
use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
-use mir::lvalue::LvalueRef;
+use mir::lvalue::{LvalueRef, Alignment};
use base::*;
use common::*;
use declare;
use std::cmp::Ordering;
use std::iter;
-use mir::lvalue::Alignment;
-
fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
C_nil(ccx)
}
// Effectively no-ops
- "uninit" | "forget" => {
+ "uninit" => {
C_nil(ccx)
}
"needs_drop" => {
for i in 0..elems.len() {
let val = bcx.extract_value(val, i);
- bcx.store(val, bcx.struct_gep(llresult, i), None);
+ let lval = LvalueRef::new_sized_ty(llresult, ret_ty,
+ Alignment::AbiAligned);
+ let (dest, align) = lval.trans_field_ptr(bcx, i);
+ bcx.store(val, dest, align.to_align());
}
C_nil(ccx)
}
#![feature(conservative_impl_trait)]
use rustc::dep_graph::WorkProduct;
+use syntax_pos::symbol::Symbol;
extern crate flate;
extern crate libc;
unsafe impl Sync for ModuleTranslation { }
pub struct CrateTranslation {
+ pub crate_name: Symbol,
pub modules: Vec<ModuleTranslation>,
pub metadata_module: ModuleTranslation,
pub link: middle::cstore::LinkMeta,
- pub metadata: Vec<u8>,
+ pub metadata: middle::cstore::EncodedMetadata,
pub exported_symbols: back::symbol_export::ExportedSymbols,
pub no_builtins: bool,
pub windows_subsystem: Option<String>,
use rustc::mir::traversal;
use common;
use super::MirContext;
-use super::rvalue;
pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
let mir = mircx.mir;
if let mir::Lvalue::Local(index) = *lvalue {
self.mark_assigned(index);
- if !rvalue::rvalue_creates_operand(rvalue) {
+ if !self.cx.rvalue_creates_operand(rvalue) {
self.mark_as_lvalue(index);
}
} else {
use libc::c_uint;
use llvm::{self, ValueRef, BasicBlockRef};
use llvm::debuginfo::DIScope;
-use rustc::ty;
+use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir::{self, Mir};
use rustc::mir::tcx::LvalueTy;
use rustc::ty::subst::Substs;
use rustc::infer::TransNormalize;
-use rustc::ty::TypeFoldable;
use session::config::FullDebugInfo;
use base;
use builder::Builder;
-use common::{self, CrateContext, C_null, Funclet};
+use common::{self, CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::{self, Instance};
use abi::FnType;
impl<'tcx> LocalRef<'tcx> {
fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>,
- ty: ty::Ty<'tcx>) -> LocalRef<'tcx> {
+ ty: Ty<'tcx>) -> LocalRef<'tcx> {
if common::type_is_zero_size(ccx, ty) {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
// we need something in the operand.
- let llty = type_of::type_of(ccx, ty);
- let val = if common::type_is_imm_pair(ccx, ty) {
- let fields = llty.field_types();
- OperandValue::Pair(C_null(fields[0]), C_null(fields[1]))
- } else {
- OperandValue::Immediate(C_null(llty))
- };
- let op = OperandRef {
- val: val,
- ty: ty
- };
- LocalRef::Operand(Some(op))
+ LocalRef::Operand(Some(OperandRef::new_zst(ccx, ty)))
} else {
LocalRef::Operand(None)
}
debug!("fn_ty: {:?}", fn_ty);
let debug_context =
debuginfo::create_function_debug_context(ccx, instance, sig, llfn, mir);
- let bcx = Builder::new_block(ccx, llfn, "entry-block");
+ let bcx = Builder::new_block(ccx, llfn, "start");
let cleanup_kinds = analyze::cleanup_kinds(&mir);
- // Allocate a `Block` for every basic block
+ // Allocate a `Block` for every basic block, except
+ // the start block, if nothing loops back to it.
+ let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
let block_bcxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
mir.basic_blocks().indices().map(|bb| {
- if bb == mir::START_BLOCK {
- bcx.build_sibling_block("start").llbb()
+ if bb == mir::START_BLOCK && !reentrant_start_block {
+ bcx.llbb()
} else {
bcx.build_sibling_block(&format!("{:?}", bb)).llbb()
}
if let Some(name) = decl.name {
// User variable
- let source_info = decl.source_info.unwrap();
- let debug_scope = mircx.scopes[source_info.scope];
+ let debug_scope = mircx.scopes[decl.source_info.scope];
let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
if !lvalue_locals.contains(local.index()) && !dbg {
assert!(!ty.has_erasable_regions());
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
if dbg {
- let (scope, span) = mircx.debug_loc(source_info);
+ let (scope, span) = mircx.debug_loc(decl.source_info);
declare_local(&bcx, &mircx.debug_context, name, ty, scope,
VariableAccess::DirectVariable { alloca: lvalue.llval },
VariableKind::LocalVariable, span);
.collect()
};
- // Branch to the START block
- let start_bcx = mircx.blocks[mir::START_BLOCK];
- bcx.br(start_bcx);
+ // Branch to the START block, if it's not the entry block.
+ if reentrant_start_block {
+ bcx.br(mircx.blocks[mir::START_BLOCK]);
+ }
// Up until here, IR instructions for this function have explicitly not been annotated with
// source code location, so we don't step into call setup code. From here on, source location
let lvalue = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
- let dst = bcx.struct_gep(lvalue.llval, i);
+ let (dst, _) = lvalue.trans_field_ptr(bcx, i);
let arg = &mircx.fn_ty.args[idx];
idx += 1;
if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
use rustc_data_structures::indexed_vec::Idx;
use base;
-use common;
+use common::{self, CrateContext, C_null};
use builder::Builder;
use value::Value;
use type_of;
}
impl<'a, 'tcx> OperandRef<'tcx> {
+ pub fn new_zst(ccx: &CrateContext<'a, 'tcx>,
+ ty: Ty<'tcx>) -> OperandRef<'tcx> {
+ assert!(common::type_is_zero_size(ccx, ty));
+ let llty = type_of::type_of(ccx, ty);
+ let val = if common::type_is_imm_pair(ccx, ty) {
+ let fields = llty.field_types();
+ OperandValue::Pair(C_null(fields[0]), C_null(fields[1]))
+ } else {
+ OperandValue::Immediate(C_null(llty))
+ };
+ OperandRef {
+ val: val,
+ ty: ty
+ }
+ }
+
/// Asserts that this operand refers to a scalar and returns
/// a reference to its value.
pub fn immediate(self) -> ValueRef {
}
_ => {
- assert!(rvalue_creates_operand(rvalue));
+ assert!(self.rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
bcx
rvalue: &mir::Rvalue<'tcx>)
-> (Builder<'a, 'tcx>, OperandRef<'tcx>)
{
- assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
+ assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
}
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
- bug!("cannot generate operand from rvalue {:?}", rvalue);
-
+ // According to `rvalue_creates_operand`, only ZST
+ // aggregate rvalues are allowed to be operands.
+ let ty = rvalue.ty(self.mir, self.ccx.tcx());
+ (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty)))
}
}
}
OperandValue::Pair(val, of)
}
-}
-pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool {
- match *rvalue {
- mir::Rvalue::Ref(..) |
- mir::Rvalue::Len(..) |
- mir::Rvalue::Cast(..) | // (*)
- mir::Rvalue::BinaryOp(..) |
- mir::Rvalue::CheckedBinaryOp(..) |
- mir::Rvalue::UnaryOp(..) |
- mir::Rvalue::Discriminant(..) |
- mir::Rvalue::Box(..) |
- mir::Rvalue::Use(..) => // (*)
- true,
- mir::Rvalue::Repeat(..) |
- mir::Rvalue::Aggregate(..) =>
- false,
- }
+ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
+ match *rvalue {
+ mir::Rvalue::Ref(..) |
+ mir::Rvalue::Len(..) |
+ mir::Rvalue::Cast(..) | // (*)
+ mir::Rvalue::BinaryOp(..) |
+ mir::Rvalue::CheckedBinaryOp(..) |
+ mir::Rvalue::UnaryOp(..) |
+ mir::Rvalue::Discriminant(..) |
+ mir::Rvalue::Box(..) |
+ mir::Rvalue::Use(..) => // (*)
+ true,
+ mir::Rvalue::Repeat(..) |
+ mir::Rvalue::Aggregate(..) => {
+ let ty = rvalue.ty(self.mir, self.ccx.tcx());
+ let ty = self.monomorphize(&ty);
+ common::type_is_zero_size(self.ccx, ty)
+ }
+ }
- // (*) this is only true if the type is suitable
+ // (*) this is only true if the type is suitable
+ }
}
#[derive(Copy, Clone)]
symbol_name.len().hash(&mut state);
symbol_name.hash(&mut state);
let exported = match item {
- TransItem::Fn(ref instance) => {
- let node_id =
- scx.tcx().hir.as_local_node_id(instance.def_id());
+ TransItem::Fn(ref instance) => {
+ let node_id =
+ scx.tcx().hir.as_local_node_id(instance.def_id());
node_id.map(|node_id| exported_symbols.contains(&node_id))
- .unwrap_or(false)
- }
- TransItem::Static(node_id) => {
+ .unwrap_or(false)
+ }
+ TransItem::Static(node_id) => {
exported_symbols.contains(&node_id)
- }
+ }
+ TransItem::GlobalAsm(..) => true,
};
exported.hash(&mut state);
}
TransItem::Fn(instance) => {
tcx.hir.as_local_node_id(instance.def_id())
}
- TransItem::Static(node_id) => Some(node_id),
+ TransItem::Static(node_id) | TransItem::GlobalAsm(node_id) => {
+ Some(node_id)
+ }
}
}
}
None => {
match trans_item {
TransItem::Fn(..) |
- TransItem::Static(..) => llvm::ExternalLinkage,
+ TransItem::Static(..) |
+ TransItem::GlobalAsm(..) => llvm::ExternalLinkage,
}
}
};
Some(def_id)
}
- TransItem::Static(node_id) => Some(tcx.hir.local_def_id(node_id)),
+ TransItem::Static(node_id) |
+ TransItem::GlobalAsm(node_id) => Some(tcx.hir.local_def_id(node_id)),
}
}
TransItem::Fn(Instance { def, .. }) => {
tcx.hir.as_local_node_id(def.def_id())
}
- TransItem::Static(node_id) => Some(node_id),
+ TransItem::Static(node_id) |
+ TransItem::GlobalAsm(node_id) => {
+ Some(node_id)
+ }
}.map(|node_id| {
tcx.hir.span(node_id)
})
//! item-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
+use asm;
use attributes;
use base;
use consts;
#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
pub enum TransItem<'tcx> {
Fn(Instance<'tcx>),
- Static(NodeId)
+ Static(NodeId),
+ GlobalAsm(NodeId),
}
/// Describes how a translation item will be instantiated in object files.
span_bug!(item.span, "Mismatch between hir::Item type and TransItem type")
}
}
+ TransItem::GlobalAsm(node_id) => {
+ let item = ccx.tcx().hir.expect_item(node_id);
+ if let hir::ItemGlobalAsm(ref ga) = item.node {
+ asm::trans_global_asm(ccx, ga);
+ } else {
+ span_bug!(item.span, "Mismatch between hir::Item type and TransItem type")
+ }
+ }
TransItem::Fn(instance) => {
let _task = ccx.tcx().dep_graph.in_task(
DepNode::TransCrateItem(instance.def_id())); // (*)
TransItem::Fn(instance) => {
TransItem::predefine_fn(ccx, instance, linkage, &symbol_name);
}
+ TransItem::GlobalAsm(..) => {}
}
debug!("END PREDEFINING '{} ({})' in cgu {}",
let def_id = scx.tcx().hir.local_def_id(node_id);
symbol_names::symbol_name(Instance::mono(scx.tcx(), def_id), scx)
}
+ TransItem::GlobalAsm(node_id) => {
+ let def_id = scx.tcx().hir.local_def_id(node_id);
+ format!("global_asm_{:?}", def_id)
+ }
}
}
}
}
TransItem::Static(..) => InstantiationMode::GloballyShared,
+ TransItem::GlobalAsm(..) => InstantiationMode::GloballyShared,
}
}
TransItem::Fn(ref instance) => {
instance.substs.types().next().is_some()
}
- TransItem::Static(..) => false,
+ TransItem::Static(..) |
+ TransItem::GlobalAsm(..) => false,
}
}
let def_id = match *self {
TransItem::Fn(ref instance) => instance.def_id(),
TransItem::Static(node_id) => tcx.hir.local_def_id(node_id),
+ TransItem::GlobalAsm(..) => return None,
};
let attributes = tcx.get_attrs(def_id);
let instance = Instance::new(def_id, tcx.intern_substs(&[]));
to_string_internal(tcx, "static ", instance)
},
+ TransItem::GlobalAsm(..) => {
+ "global_asm".to_string()
+ }
};
fn to_string_internal<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
TransItem::Static(id) => {
format!("Static({:?})", id)
}
+ TransItem::GlobalAsm(id) => {
+ format!("GlobalAsm({:?})", id)
+ }
}
}
}
#![allow(non_upper_case_globals)]
use llvm;
-use llvm::{TypeRef, Bool, False, True, TypeKind};
+use llvm::{ContextRef, TypeRef, Bool, False, True, TypeKind};
use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128};
use context::CrateContext;
ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
}
+ pub fn i8_llcx(llcx: ContextRef) -> Type {
+ ty!(llvm::LLVMInt8TypeInContext(llcx))
+ }
+
pub fn i16(ccx: &CrateContext) -> Type {
ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
}
Type::i8(ccx).ptr_to()
}
+ pub fn i8p_llcx(llcx: ContextRef) -> Type {
+ Type::i8_llcx(llcx).ptr_to()
+ }
+
pub fn int(ccx: &CrateContext) -> Type {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"16" => Type::i16(ccx),
// If the callee is a bare function or a closure, then we're all set.
match self.structurally_resolved_type(callee_expr.span, adjusted_ty).sty {
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
- self.write_autoderef_adjustment(callee_expr.id, autoderefs, adjusted_ty);
+ self.apply_autoderef_adjustment(callee_expr.id, autoderefs, adjusted_ty);
return Some(CallStep::Builtin);
}
ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref()),
ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()),
ty::Predicate::Equate(..) => None,
+ ty::Predicate::Subtype(..) => None,
ty::Predicate::RegionOutlives(..) => None,
ty::Predicate::TypeOutlives(..) => None,
ty::Predicate::WellFormed(..) => None,
// Consider coercing the subtype to a DST
let unsize = self.coerce_unsized(a, b);
if unsize.is_ok() {
+ debug!("coerce: unsize successful");
return unsize;
}
+ debug!("coerce: unsize failed");
// Examine the supertype and consider auto-borrowing.
//
self.commit_if_ok(|_| {
let ok = coerce.coerce(&[expr], source, target)?;
let adjustment = self.register_infer_ok_obligations(ok);
- if !adjustment.is_identity() {
- debug!("Success, coerced with {:?}", adjustment);
- if self.tables.borrow().adjustments.get(&expr.id).is_some() {
- bug!("expr already has an adjustment on it!");
- }
- self.write_adjustment(expr.id, adjustment);
- }
+ self.apply_adjustment(expr.id, adjustment);
// We should now have added sufficient adjustments etc to
// ensure that the type of expression, post-adjustment, is
{
let prev_ty = self.resolve_type_vars_with_obligations(prev_ty);
let new_ty = self.resolve_type_vars_with_obligations(new_ty);
- debug!("coercion::try_find_lub({:?}, {:?})", prev_ty, new_ty);
+ debug!("coercion::try_find_coercion_lub({:?}, {:?})", prev_ty, new_ty);
// Special-ish case: we can coerce any type `T` into the `!`
// type, but only if the source expression diverges.
// Reify both sides and return the reified fn pointer type.
let fn_ptr = self.tcx.mk_fn_ptr(fty);
for expr in exprs.iter().map(|e| e.as_coercion_site()).chain(Some(new)) {
- // No adjustments can produce a fn item, so this should never trip.
- assert!(!self.tables.borrow().adjustments.contains_key(&expr.id));
- self.write_adjustment(expr.id, Adjustment {
+ // The only adjustment that can produce an fn item is
+ // `NeverToAny`, so this should always be valid.
+ self.apply_adjustment(expr.id, Adjustment {
kind: Adjust::ReifyFnPointer,
target: fn_ptr
});
match result {
Ok(ok) => {
let adjustment = self.register_infer_ok_obligations(ok);
- if !adjustment.is_identity() {
- self.write_adjustment(new.id, adjustment);
- }
+ self.apply_adjustment(new.id, adjustment);
return Ok(adjustment.target);
}
Err(e) => first_error = Some(e),
}) => {
match self.node_ty(expr.id).sty {
ty::TyRef(_, mt_orig) => {
- // Reborrow that we can safely ignore.
+ // Reborrow that we can safely ignore, because
+ // the next adjustment can only be a DerefRef
+ // which will be merged into it.
mutbl_adj == mt_orig.mutbl
}
_ => false,
}
Ok(ok) => {
let adjustment = self.register_infer_ok_obligations(ok);
- if !adjustment.is_identity() {
- let mut tables = self.tables.borrow_mut();
- for expr in exprs {
- let expr = expr.as_coercion_site();
- if let Some(&mut Adjustment {
- kind: Adjust::NeverToAny,
- ref mut target
- }) = tables.adjustments.get_mut(&expr.id) {
- *target = adjustment.target;
- continue;
- }
- tables.adjustments.insert(expr.id, adjustment);
- }
+ for expr in exprs {
+ let expr = expr.as_coercion_site();
+ self.apply_adjustment(expr.id, adjustment);
}
Ok(adjustment.target)
}
"rustc_peek" => (1, vec![param(0)], param(0)),
"init" => (1, Vec::new(), param(0)),
"uninit" => (1, Vec::new(), param(0)),
- "forget" => (1, vec![ param(0) ], tcx.mk_nil()),
"transmute" => (2, vec![ param(0) ], param(1)),
"move_val_init" => {
(1,
let target = target.adjust_for_autoref(self.tcx, autoref);
// Write out the final adjustment.
- self.write_adjustment(self.self_expr.id, Adjustment {
+ self.apply_adjustment(self.self_expr.id, Adjustment {
kind: Adjust::DerefRef {
autoderefs: pick.autoderefs,
autoref: autoref,
for (i, &expr) in exprs.iter().rev().enumerate() {
debug!("convert_lvalue_derefs_to_mutable: i={} expr={:?}", i, expr);
- // Count autoderefs.
+ // Count autoderefs. We don't need to fix up the autoref - the parent
+ // expression will fix them up for us.
let adjustment = self.tables.borrow().adjustments.get(&expr.id).cloned();
match adjustment {
Some(Adjustment { kind: Adjust::DerefRef { autoderefs, .. }, .. }) => {
// expects. This is annoying and horrible. We
// ought to recode this routine so it doesn't
// (ab)use the normal type checking paths.
- let adj = self.tables.borrow().adjustments.get(&base_expr.id).cloned();
+ let adj = self.tables.borrow_mut().adjustments.remove(&base_expr.id);
let (autoderefs, unsize, adjusted_base_ty) = match adj {
Some(Adjustment {
kind: Adjust::DerefRef { autoderefs, autoref, unsize },
// a preference for mut
let method_call = ty::MethodCall::expr(expr.id);
if self.tables.borrow().method_map.contains_key(&method_call) {
+ self.tables.borrow_mut().adjustments.remove(&base_expr.id);
let method = self.try_overloaded_deref(expr.span,
Some(&base_expr),
self.node_ty(base_expr.id),
}
};
- self.write_adjustment(self_expr.id, Adjustment {
+ self.apply_adjustment(self_expr.id, Adjustment {
kind: Adjust::DerefRef {
autoderefs: autoderefs,
autoref: autoref,
}
}
ty::Predicate::Equate(..) |
+ ty::Predicate::Subtype(..) |
ty::Predicate::Projection(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::WellFormed(..) |
self.probe(|_| {
// First check that the self type can be related.
- match self.sub_types(false,
- &ObligationCause::dummy(),
- self_ty,
- probe.xform_self_ty) {
- Ok(InferOk { obligations, value: () }) => {
- // FIXME(#32730) propagate obligations
- assert!(obligations.is_empty())
- }
+ let sub_obligations = match self.sub_types(false,
+ &ObligationCause::dummy(),
+ self_ty,
+ probe.xform_self_ty) {
+ Ok(InferOk { obligations, value: () }) => obligations,
Err(_) => {
debug!("--> cannot relate self-types");
return false;
}
- }
+ };
// If so, impls may carry other conditions (e.g., where
// clauses) that must be considered. Make sure that those
// Evaluate those obligations to see if they might possibly hold.
let mut all_true = true;
for o in obligations.iter()
+ .chain(sub_obligations.iter())
.chain(norm_obligations.iter())
.chain(ref_obligations.iter()) {
if !selcx.evaluate_obligation(o) {
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc_back::slice::ref_slice;
use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin};
-use rustc::infer::type_variable::{self, TypeVariableOrigin};
+use rustc::infer::type_variable::{TypeVariableOrigin};
use rustc::ty::subst::{Kind, Subst, Substs};
-use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal};
+use rustc::traits::{self, FulfillmentContext, ObligationCause, ObligationCauseCode, Reveal};
use rustc::ty::{ParamTy, ParameterEnvironment};
use rustc::ty::{LvaluePreference, NoPreference, PreferMutLvalue};
use rustc::ty::{self, Ty, TyCtxt, Visibility};
use rustc::ty::{MethodCall, MethodCallee};
-use rustc::ty::adjustment;
+use rustc::ty::adjustment::{Adjust, Adjustment, AutoBorrow};
use rustc::ty::fold::{BottomUpFolder, TypeFoldable};
use rustc::ty::maps::Providers;
use rustc::ty::util::{Representability, IntTypeExt};
use TypeAndSubsts;
use lint;
use util::common::{ErrorReported, indenter};
-use util::nodemap::{DefIdMap, FxHashMap, FxHashSet, NodeMap};
+use util::nodemap::{DefIdMap, FxHashMap, NodeMap};
use std::cell::{Cell, RefCell};
+use std::collections::hash_map::Entry;
use std::cmp;
use std::mem::replace;
use std::ops::{self, Deref};
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
- if def.repr.simd {
+ if def.repr.simd() {
check_simd(tcx, span, def_id);
}
}
}
}
- pub fn write_autoderef_adjustment(&self,
+ pub fn apply_autoderef_adjustment(&self,
node_id: ast::NodeId,
derefs: usize,
adjusted_ty: Ty<'tcx>) {
- self.write_adjustment(node_id, adjustment::Adjustment {
- kind: adjustment::Adjust::DerefRef {
+ self.apply_adjustment(node_id, Adjustment {
+ kind: Adjust::DerefRef {
autoderefs: derefs,
autoref: None,
unsize: false
});
}
- pub fn write_adjustment(&self,
- node_id: ast::NodeId,
- adj: adjustment::Adjustment<'tcx>) {
- debug!("write_adjustment(node_id={}, adj={:?})", node_id, adj);
+ pub fn apply_adjustment(&self, node_id: ast::NodeId, adj: Adjustment<'tcx>) {
+ debug!("apply_adjustment(node_id={}, adj={:?})", node_id, adj);
if adj.is_identity() {
return;
}
- self.tables.borrow_mut().adjustments.insert(node_id, adj);
+ match self.tables.borrow_mut().adjustments.entry(node_id) {
+ Entry::Vacant(entry) => { entry.insert(adj); },
+ Entry::Occupied(mut entry) => {
+ debug!(" - composing on top of {:?}", entry.get());
+ let composed_kind = match (entry.get().kind, adj.kind) {
+ // Applying any adjustment on top of a NeverToAny
+ // is a valid NeverToAny adjustment, because it can't
+ // be reached.
+ (Adjust::NeverToAny, _) => Adjust::NeverToAny,
+ (Adjust::DerefRef {
+ autoderefs: 1,
+ autoref: Some(AutoBorrow::Ref(..)),
+ unsize: false
+ }, Adjust::DerefRef { autoderefs, .. }) if autoderefs > 0 => {
+ // A reborrow has no effect before a dereference.
+ adj.kind
+ }
+ // FIXME: currently we never try to compose autoderefs
+ // and ReifyFnPointer/UnsafeFnPointer, but we could.
+ _ =>
+ bug!("while adjusting {}, can't compose {:?} and {:?}",
+ node_id, entry.get(), adj)
+ };
+ *entry.get_mut() = Adjustment {
+ kind: composed_kind,
+ target: adj.target
+ };
+ }
+ }
}
/// Basically whenever we are converting from a type scheme into
}
}
+ // Implements type inference fallback algorithm
fn select_all_obligations_and_apply_defaults(&self) {
- if self.tcx.sess.features.borrow().default_type_parameter_fallback {
- self.new_select_all_obligations_and_apply_defaults();
- } else {
- self.old_select_all_obligations_and_apply_defaults();
- }
- }
-
- // Implements old type inference fallback algorithm
- fn old_select_all_obligations_and_apply_defaults(&self) {
self.select_obligations_where_possible();
self.default_type_parameters();
self.select_obligations_where_possible();
}
- fn new_select_all_obligations_and_apply_defaults(&self) {
- use rustc::ty::error::UnconstrainedNumeric::Neither;
- use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
-
- // For the time being this errs on the side of being memory wasteful but provides better
- // error reporting.
- // let type_variables = self.type_variables.clone();
-
- // There is a possibility that this algorithm will have to run an arbitrary number of times
- // to terminate so we bound it by the compiler's recursion limit.
- for _ in 0..self.tcx.sess.recursion_limit.get() {
- // First we try to solve all obligations, it is possible that the last iteration
- // has made it possible to make more progress.
- self.select_obligations_where_possible();
-
- let mut conflicts = Vec::new();
-
- // Collect all unsolved type, integral and floating point variables.
- let unsolved_variables = self.unsolved_variables();
-
- // We must collect the defaults *before* we do any unification. Because we have
- // directly attached defaults to the type variables any unification that occurs
- // will erase defaults causing conflicting defaults to be completely ignored.
- let default_map: FxHashMap<Ty<'tcx>, _> =
- unsolved_variables
- .iter()
- .filter_map(|t| self.default(t).map(|d| (*t, d)))
- .collect();
-
- let mut unbound_tyvars = FxHashSet();
-
- debug!("select_all_obligations_and_apply_defaults: defaults={:?}", default_map);
-
- // We loop over the unsolved variables, resolving them and if they are
- // and unconstrainted numeric type we add them to the set of unbound
- // variables. We do this so we only apply literal fallback to type
- // variables without defaults.
- for ty in &unsolved_variables {
- let resolved = self.resolve_type_vars_if_possible(ty);
- if self.type_var_diverges(resolved) {
- self.demand_eqtype(syntax_pos::DUMMY_SP, *ty,
- self.tcx.mk_diverging_default());
- } else {
- match self.type_is_unconstrained_numeric(resolved) {
- UnconstrainedInt | UnconstrainedFloat => {
- unbound_tyvars.insert(resolved);
- },
- Neither => {}
- }
- }
- }
-
- // We now remove any numeric types that also have defaults, and instead insert
- // the type variable with a defined fallback.
- for ty in &unsolved_variables {
- if let Some(_default) = default_map.get(ty) {
- let resolved = self.resolve_type_vars_if_possible(ty);
-
- debug!("select_all_obligations_and_apply_defaults: \
- ty: {:?} with default: {:?}",
- ty, _default);
-
- match resolved.sty {
- ty::TyInfer(ty::TyVar(_)) => {
- unbound_tyvars.insert(ty);
- }
-
- ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) => {
- unbound_tyvars.insert(ty);
- if unbound_tyvars.contains(resolved) {
- unbound_tyvars.remove(resolved);
- }
- }
-
- _ => {}
- }
- }
- }
-
- // If there are no more fallbacks to apply at this point we have applied all possible
- // defaults and type inference will proceed as normal.
- if unbound_tyvars.is_empty() {
- break;
- }
-
- // Finally we go through each of the unbound type variables and unify them with
- // the proper fallback, reporting a conflicting default error if any of the
- // unifications fail. We know it must be a conflicting default because the
- // variable would only be in `unbound_tyvars` and have a concrete value if
- // it had been solved by previously applying a default.
-
- // We wrap this in a transaction for error reporting, if we detect a conflict
- // we will rollback the inference context to its prior state so we can probe
- // for conflicts and correctly report them.
-
- let _ = self.commit_if_ok(|_: &infer::CombinedSnapshot| {
- conflicts.extend(
- self.apply_defaults_and_return_conflicts(&unbound_tyvars, &default_map, None)
- );
-
- // If there are conflicts we rollback, otherwise commit
- if conflicts.len() > 0 {
- Err(())
- } else {
- Ok(())
- }
- });
-
- // Loop through each conflicting default, figuring out the default that caused
- // a unification failure and then report an error for each.
- for (conflict, default) in conflicts {
- let conflicting_default =
- self.apply_defaults_and_return_conflicts(
- &unbound_tyvars,
- &default_map,
- Some(conflict)
- )
- .last()
- .map(|(_, tv)| tv)
- .unwrap_or(type_variable::Default {
- ty: self.next_ty_var(
- TypeVariableOrigin::MiscVariable(syntax_pos::DUMMY_SP)),
- origin_span: syntax_pos::DUMMY_SP,
- // what do I put here?
- def_id: self.tcx.hir.local_def_id(ast::CRATE_NODE_ID)
- });
-
- // This is to ensure that we elimnate any non-determinism from the error
- // reporting by fixing an order, it doesn't matter what order we choose
- // just that it is consistent.
- let (first_default, second_default) =
- if default.def_id < conflicting_default.def_id {
- (default, conflicting_default)
- } else {
- (conflicting_default, default)
- };
-
-
- self.report_conflicting_default_types(
- first_default.origin_span,
- self.body_id,
- first_default,
- second_default)
- }
- }
-
- self.select_obligations_where_possible();
- }
-
- // For use in error handling related to default type parameter fallback. We explicitly
- // apply the default that caused conflict first to a local version of the type variable
- // table then apply defaults until we find a conflict. That default must be the one
- // that caused conflict earlier.
- fn apply_defaults_and_return_conflicts<'b>(
- &'b self,
- unbound_vars: &'b FxHashSet<Ty<'tcx>>,
- default_map: &'b FxHashMap<Ty<'tcx>, type_variable::Default<'tcx>>,
- conflict: Option<Ty<'tcx>>,
- ) -> impl Iterator<Item=(Ty<'tcx>, type_variable::Default<'tcx>)> + 'b {
- use rustc::ty::error::UnconstrainedNumeric::Neither;
- use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
-
- conflict.into_iter().chain(unbound_vars.iter().cloned()).flat_map(move |ty| {
- if self.type_var_diverges(ty) {
- self.demand_eqtype(syntax_pos::DUMMY_SP, ty,
- self.tcx.mk_diverging_default());
- } else {
- match self.type_is_unconstrained_numeric(ty) {
- UnconstrainedInt => {
- self.demand_eqtype(syntax_pos::DUMMY_SP, ty, self.tcx.types.i32)
- },
- UnconstrainedFloat => {
- self.demand_eqtype(syntax_pos::DUMMY_SP, ty, self.tcx.types.f64)
- },
- Neither => {
- if let Some(default) = default_map.get(ty) {
- let default = default.clone();
- let default_ty = self.normalize_associated_types_in(
- default.origin_span, &default.ty);
- match self.eq_types(false,
- &self.misc(default.origin_span),
- ty,
- default_ty) {
- Ok(ok) => self.register_infer_ok_obligations(ok),
- Err(_) => {
- return Some((ty, default));
- }
- }
- }
- }
- }
- }
-
- None
- })
- }
-
fn select_all_obligations_or_error(&self) {
debug!("select_all_obligations_or_error");
debug!("try_index_step: success, using built-in indexing");
// If we had `[T; N]`, we should've caught it before unsizing to `[T]`.
assert!(!unsize);
- self.write_autoderef_adjustment(base_expr.id, autoderefs, adjusted_ty);
+ self.apply_autoderef_adjustment(base_expr.id, autoderefs, adjusted_ty);
return Some((tcx.types.usize, ty));
}
_ => {}
"expression with never type wound up being adjusted");
let adj_ty = self.next_diverging_ty_var(
TypeVariableOrigin::AdjustmentType(expr.span));
- self.write_adjustment(expr.id, adjustment::Adjustment {
- kind: adjustment::Adjust::NeverToAny,
+ self.apply_adjustment(expr.id, Adjustment {
+ kind: Adjust::NeverToAny,
target: adj_ty
});
ty = adj_ty;
// No argument expectations are produced if unification fails.
let origin = self.misc(call_span);
let ures = self.sub_types(false, &origin, formal_ret, ret_ty);
+
// FIXME(#15760) can't use try! here, FromError doesn't default
// to identity so the resulting type is not constrained.
match ures {
- Ok(ok) => self.register_infer_ok_obligations(ok),
- Err(e) => return Err(e),
+ Ok(ok) => {
+ // Process any obligations locally as much as
+ // we can. We don't care if some things turn
+ // out unconstrained or ambiguous, as we're
+ // just trying to get hints here.
+ let result = self.save_and_restore_obligations_in_snapshot_flag(|_| {
+ let mut fulfill = FulfillmentContext::new();
+ let ok = ok; // FIXME(#30046)
+ for obligation in ok.obligations {
+ fulfill.register_predicate_obligation(self, obligation);
+ }
+ fulfill.select_where_possible(self)
+ });
+
+ match result {
+ Ok(()) => { }
+ Err(_) => return Err(()),
+ }
+ }
+ Err(_) => return Err(()),
}
// Record all the argument types, with the substitutions
let field_ty = self.field_ty(expr.span, field, substs);
if self.tcx.vis_is_accessible_from(field.vis, self.body_id) {
autoderef.finalize(lvalue_pref, &[base]);
- self.write_autoderef_adjustment(base.id, autoderefs, base_t);
+ self.apply_autoderef_adjustment(base.id, autoderefs, base_t);
self.tcx.check_stability(field.did, expr.id, expr.span);
if let Some(field_ty) = field {
autoderef.finalize(lvalue_pref, &[base]);
- self.write_autoderef_adjustment(base.id, autoderefs, base_t);
+ self.apply_autoderef_adjustment(base.id, autoderefs, base_t);
return field_ty;
}
}
let def_id = tcx.hir.local_def_id(item_id);
match it.node {
// These don't define types.
- hir::ItemExternCrate(_) | hir::ItemUse(..) | hir::ItemMod(_) => {
- }
+ hir::ItemExternCrate(_) |
+ hir::ItemUse(..) |
+ hir::ItemMod(_) |
+ hir::ItemGlobalAsm(_) => {}
hir::ItemForeignMod(ref foreign_mod) => {
for item in &foreign_mod.items {
let def_id = tcx.hir.local_def_id(item.id);
tcx.item_generics(def_id);
tcx.item_type(def_id);
tcx.item_predicates(def_id);
- },
- _ => {
+ }
+ hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) => {
tcx.item_generics(def_id);
tcx.item_type(def_id);
tcx.item_predicates(def_id);
- },
+ }
}
}
ItemTrait(..) |
ItemMod(..) |
ItemForeignMod(..) |
+ ItemGlobalAsm(..) |
ItemExternCrate(..) |
ItemUse(..) => {
span_bug!(
You hit this error because the compiler lacks the information to
determine the type of this variable. Erroneous code example:
-```compile_fail,E0102
+```compile_fail,E0282
// could be an array of anything
let x = []; // error: cannot determine a type for this local variable
```
hir::ItemFn(..) |
hir::ItemMod(..) |
hir::ItemForeignMod(..) |
+ hir::ItemGlobalAsm(..) |
hir::ItemTy(..) |
hir::ItemImpl(..) |
hir::ItemDefaultImpl(..) => {}
hir::ItemFn(..) |
hir::ItemMod(..) |
hir::ItemForeignMod(..) |
+ hir::ItemGlobalAsm(..) |
hir::ItemTy(..) => {}
}
}
authors = ["The Rust Project Developers"]
name = "rustdoc"
version = "0.0.0"
+build = "build.rs"
[lib]
name = "rustdoc"
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate build_helper;
+extern crate gcc;
+
+fn main() {
+ let src_dir = std::path::Path::new("../rt/hoedown/src");
+ build_helper::rerun_if_changed_anything_in_dir(src_dir);
+ let mut cfg = gcc::Config::new();
+ cfg.file("../rt/hoedown/src/autolink.c")
+ .file("../rt/hoedown/src/buffer.c")
+ .file("../rt/hoedown/src/document.c")
+ .file("../rt/hoedown/src/escape.c")
+ .file("../rt/hoedown/src/html.c")
+ .file("../rt/hoedown/src/html_blocks.c")
+ .file("../rt/hoedown/src/html_smartypants.c")
+ .file("../rt/hoedown/src/stack.c")
+ .file("../rt/hoedown/src/version.c")
+ .include(src_dir)
+ .compile("libhoedown.a");
+}
+
self.type_() == ItemType::Struct
}
pub fn is_enum(&self) -> bool {
- self.type_() == ItemType::Module
+ self.type_() == ItemType::Enum
}
pub fn is_fn(&self) -> bool {
self.type_() == ItemType::Function
pub fn is_primitive(&self) -> bool {
self.type_() == ItemType::Primitive
}
+ pub fn is_union(&self) -> bool {
+ self.type_() == ItemType::Union
+ }
pub fn is_stripped(&self) -> bool {
match self.inner { StrippedItem(..) => true, _ => false }
}
pub enum WherePredicate {
BoundPredicate { ty: Type, bounds: Vec<TyParamBound> },
RegionPredicate { lifetime: Lifetime, bounds: Vec<Lifetime>},
- EqPredicate { lhs: Type, rhs: Type }
+ EqPredicate { lhs: Type, rhs: Type },
}
impl Clean<WherePredicate> for hir::WherePredicate {
match *self {
Predicate::Trait(ref pred) => pred.clean(cx),
Predicate::Equate(ref pred) => pred.clean(cx),
+ Predicate::Subtype(ref pred) => pred.clean(cx),
Predicate::RegionOutlives(ref pred) => pred.clean(cx),
Predicate::TypeOutlives(ref pred) => pred.clean(cx),
Predicate::Projection(ref pred) => pred.clean(cx),
}
}
+impl<'tcx> Clean<WherePredicate> for ty::SubtypePredicate<'tcx> {
+ fn clean(&self, _cx: &DocContext) -> WherePredicate {
+ panic!("subtype predicates are an internal rustc artifact \
+ and should not be seen by rustdoc")
+ }
+}
+
impl<'tcx> Clean<WherePredicate> for ty::OutlivesPredicate<&'tcx ty::Region, &'tcx ty::Region> {
fn clean(&self, cx: &DocContext) -> WherePredicate {
let ty::OutlivesPredicate(ref a, ref b) = *self;
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)]
pub struct PathSegment {
pub name: String,
- pub params: PathParameters
+ pub params: PathParameters,
}
impl Clean<PathSegment> for hir::PathSegment {
/// rendering function with the necessary arguments for linking to a local path.
fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path,
print_all: bool, use_absolute: bool, is_not_debug: bool) -> fmt::Result {
- let last = path.segments.last().unwrap();
- let rel_root = match &*path.segments[0].name {
- "self" => Some("./".to_string()),
- _ => None,
+ let empty = clean::PathSegment {
+ name: String::new(),
+ params: clean::PathParameters::Parenthesized {
+ inputs: Vec::new(),
+ output: None,
+ }
+ };
+ let last = path.segments.last()
+ .unwrap_or(&empty);
+ let rel_root = if path.segments.is_empty() {
+ None
+ } else {
+ match &*path.segments[0].name {
+ "self" => Some("./".to_string()),
+ _ => None,
+ }
};
if print_all {
root.push_str(&seg.name);
root.push_str("/");
if is_not_debug {
- write!(w, "<a class=\"mod\"
- href=\"{}index.html\">{}</a>::",
- root,
- seg.name)?;
+ write!(w, "<a class=\"mod\" href=\"{}index.html\">{}</a>::",
+ root,
+ seg.name)?;
} else {
write!(w, "{}::", seg.name)?;
}
match href(did) {
Some((_, _, fqp)) => format!("{}::{}",
fqp[..fqp.len()-1].join("::"),
- HRef::new(did, fqp.last().unwrap())),
+ HRef::new(did, fqp.last()
+ .unwrap_or(&String::new()))),
None => format!("{}", HRef::new(did, &last.name)),
}
} else {
match href(did) {
Some((_, _, fqp)) => format!("{:?}::{:?}",
fqp[..fqp.len()-1].join("::"),
- HRef::new(did, fqp.last().unwrap())),
+ HRef::new(did, fqp.last()
+ .unwrap_or(&String::new()))),
None => format!("{:?}", HRef::new(did, &last.name)),
}
} else {
}
Ok(())
}
- // It's pretty unsightly to look at `<A as B>::C` in output, and
- // we've got hyperlinking on our side, so try to avoid longer
- // notation as much as possible by making `C` a hyperlink to trait
- // `B` to disambiguate.
- //
- // FIXME: this is still a lossy conversion and there should probably
- // be a better way of representing this in general? Most of
- // the ugliness comes from inlining across crates where
- // everything comes in as a fully resolved QPath (hard to
- // look at).
- clean::QPath {
- ref name,
- ref self_type,
- trait_: box clean::ResolvedPath { did, ref typarams, .. },
- } => {
- if f.alternate() {
- write!(f, "{:#}::", self_type)?;
- } else {
- write!(f, "{}::", self_type)?;
- }
- let path = clean::Path::singleton(name.clone());
- resolved_path(f, did, &path, true, use_absolute, is_not_debug)?;
-
- // FIXME: `typarams` are not rendered, and this seems bad?
- drop(typarams);
- Ok(())
- }
clean::QPath { ref name, ref self_type, ref trait_ } => {
+ let should_show_cast = match *trait_ {
+ box clean::ResolvedPath { .. } => {
+ let path = clean::Path::singleton(name.clone());
+ !path.segments.is_empty() && &format!("{:#}", trait_) != "()" &&
+ &format!("{:#}", self_type) != "Self"
+ }
+ _ => true,
+ };
if f.alternate() {
if is_not_debug {
- write!(f, "<{:#} as {:#}>::{}", self_type, trait_, name)
+ if should_show_cast {
+ write!(f, "<{:#} as {:#}>::", self_type, trait_)?
+ } else {
+ write!(f, "{:#}::", self_type)?
+ }
} else {
- write!(f, "<{:#?} as {:#?}>::{}", self_type, trait_, name)
+ if should_show_cast {
+ write!(f, "<{:#?} as {:#?}>::", self_type, trait_)?
+ } else {
+ write!(f, "{:#?}::", self_type)?
+ }
}
} else {
if is_not_debug {
- write!(f, "<{} as {}>::{}", self_type, trait_, name)
+ if should_show_cast {
+ write!(f, "<{} as {}>::", self_type, trait_)?
+ } else {
+ write!(f, "{}::", self_type)?
+ }
} else {
- write!(f, "<{:?} as {:?}>::{}", self_type, trait_, name)
+ if should_show_cast {
+ write!(f, "<{:?} as {:?}>::", self_type, trait_)?
+ } else {
+ write!(f, "{:?}::", self_type)?
+ }
+ }
+ };
+ match *trait_ {
+ // It's pretty unsightly to look at `<A as B>::C` in output, and
+ // we've got hyperlinking on our side, so try to avoid longer
+ // notation as much as possible by making `C` a hyperlink to trait
+ // `B` to disambiguate.
+ //
+ // FIXME: this is still a lossy conversion and there should probably
+ // be a better way of representing this in general? Most of
+ // the ugliness comes from inlining across crates where
+ // everything comes in as a fully resolved QPath (hard to
+ // look at).
+ box clean::ResolvedPath { did, ref typarams, .. } => {
+ let path = clean::Path::singleton(name.clone());
+ resolved_path(f, did, &path, true, use_absolute, is_not_debug)?;
+
+ // FIXME: `typarams` are not rendered, and this seems bad?
+ drop(typarams);
+ Ok(())
+ }
+ _ => {
+ write!(f, "{}", name)
}
}
}
#![allow(non_camel_case_types)]
+use libc;
+use std::slice;
+
use std::ascii::AsciiExt;
use std::cell::RefCell;
use std::collections::{HashMap, VecDeque};
}
}
+const DEF_OUNIT: libc::size_t = 64;
+const HOEDOWN_EXT_NO_INTRA_EMPHASIS: libc::c_uint = 1 << 11;
+const HOEDOWN_EXT_TABLES: libc::c_uint = 1 << 0;
+const HOEDOWN_EXT_FENCED_CODE: libc::c_uint = 1 << 1;
+const HOEDOWN_EXT_AUTOLINK: libc::c_uint = 1 << 3;
+const HOEDOWN_EXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
+const HOEDOWN_EXT_SUPERSCRIPT: libc::c_uint = 1 << 8;
+const HOEDOWN_EXT_FOOTNOTES: libc::c_uint = 1 << 2;
+
+const HOEDOWN_EXTENSIONS: libc::c_uint =
+ HOEDOWN_EXT_NO_INTRA_EMPHASIS | HOEDOWN_EXT_TABLES |
+ HOEDOWN_EXT_FENCED_CODE | HOEDOWN_EXT_AUTOLINK |
+ HOEDOWN_EXT_STRIKETHROUGH | HOEDOWN_EXT_SUPERSCRIPT |
+ HOEDOWN_EXT_FOOTNOTES;
+
+enum hoedown_document {}
+
+type blockcodefn = extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_buffer, *const hoedown_renderer_data,
+ libc::size_t);
+
+type blockquotefn = extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_renderer_data, libc::size_t);
+
+type headerfn = extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ libc::c_int, *const hoedown_renderer_data,
+ libc::size_t);
+
+type blockhtmlfn = extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_renderer_data, libc::size_t);
+
+type codespanfn = extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_renderer_data, libc::size_t) -> libc::c_int;
+
+type linkfn = extern "C" fn (*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_renderer_data, libc::size_t) -> libc::c_int;
+
+type entityfn = extern "C" fn (*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_renderer_data, libc::size_t);
+
+type normaltextfn = extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_renderer_data, libc::size_t);
+
+#[repr(C)]
+struct hoedown_renderer_data {
+ opaque: *mut libc::c_void,
+}
+
+#[repr(C)]
+struct hoedown_renderer {
+ opaque: *mut libc::c_void,
+
+ blockcode: Option<blockcodefn>,
+ blockquote: Option<blockquotefn>,
+ header: Option<headerfn>,
+
+ other_block_level_callbacks: [libc::size_t; 11],
+
+ blockhtml: Option<blockhtmlfn>,
+
+ /* span level callbacks - NULL or return 0 prints the span verbatim */
+ autolink: libc::size_t, // unused
+ codespan: Option<codespanfn>,
+ other_span_level_callbacks_1: [libc::size_t; 7],
+ link: Option<linkfn>,
+ other_span_level_callbacks_2: [libc::size_t; 6],
+
+ /* low level callbacks - NULL copies input directly into the output */
+ entity: Option<entityfn>,
+ normal_text: Option<normaltextfn>,
+
+ /* header and footer */
+ other_callbacks: [libc::size_t; 2],
+}
+
+#[repr(C)]
+struct hoedown_html_renderer_state {
+ opaque: *mut libc::c_void,
+ toc_data: html_toc_data,
+ flags: libc::c_uint,
+ link_attributes: Option<extern "C" fn(*mut hoedown_buffer,
+ *const hoedown_buffer,
+ *const hoedown_renderer_data)>,
+}
+
+#[repr(C)]
+struct html_toc_data {
+ header_count: libc::c_int,
+ current_level: libc::c_int,
+ level_offset: libc::c_int,
+ nesting_level: libc::c_int,
+}
+
+#[repr(C)]
+struct hoedown_buffer {
+ data: *const u8,
+ size: libc::size_t,
+ asize: libc::size_t,
+ unit: libc::size_t,
+}
+
+extern {
+ fn hoedown_html_renderer_new(render_flags: libc::c_uint,
+ nesting_level: libc::c_int)
+ -> *mut hoedown_renderer;
+ fn hoedown_html_renderer_free(renderer: *mut hoedown_renderer);
+
+ fn hoedown_document_new(rndr: *const hoedown_renderer,
+ extensions: libc::c_uint,
+ max_nesting: libc::size_t) -> *mut hoedown_document;
+ fn hoedown_document_render(doc: *mut hoedown_document,
+ ob: *mut hoedown_buffer,
+ document: *const u8,
+ doc_size: libc::size_t);
+ fn hoedown_document_free(md: *mut hoedown_document);
+
+ fn hoedown_buffer_new(unit: libc::size_t) -> *mut hoedown_buffer;
+ fn hoedown_buffer_free(b: *mut hoedown_buffer);
+}
+
+impl hoedown_buffer {
+ fn as_bytes(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.data, self.size as usize) }
+ }
+}
+
+pub fn old_find_testable_code(doc: &str, tests: &mut ::test::Collector, position: Span) {
+ extern fn block(_ob: *mut hoedown_buffer,
+ text: *const hoedown_buffer,
+ lang: *const hoedown_buffer,
+ data: *const hoedown_renderer_data,
+ line: libc::size_t) {
+ unsafe {
+ if text.is_null() { return }
+ let block_info = if lang.is_null() {
+ LangString::all_false()
+ } else {
+ let lang = (*lang).as_bytes();
+ let s = str::from_utf8(lang).unwrap();
+ LangString::parse(s)
+ };
+ if !block_info.rust { return }
+ let opaque = (*data).opaque as *mut hoedown_html_renderer_state;
+ let tests = &mut *((*opaque).opaque as *mut ::test::Collector);
+ let line = tests.get_line() + line;
+ let filename = tests.get_filename();
+ tests.add_old_test(line, filename);
+ }
+ }
+
+ extern fn header(_ob: *mut hoedown_buffer,
+ text: *const hoedown_buffer,
+ level: libc::c_int, data: *const hoedown_renderer_data,
+ _: libc::size_t) {
+ unsafe {
+ let opaque = (*data).opaque as *mut hoedown_html_renderer_state;
+ let tests = &mut *((*opaque).opaque as *mut ::test::Collector);
+ if text.is_null() {
+ tests.register_header("", level as u32);
+ } else {
+ let text = (*text).as_bytes();
+ let text = str::from_utf8(text).unwrap();
+ tests.register_header(text, level as u32);
+ }
+ }
+ }
+
+ tests.set_position(position);
+
+ unsafe {
+ let ob = hoedown_buffer_new(DEF_OUNIT);
+ let renderer = hoedown_html_renderer_new(0, 0);
+ (*renderer).blockcode = Some(block);
+ (*renderer).header = Some(header);
+ (*((*renderer).opaque as *mut hoedown_html_renderer_state)).opaque
+ = tests as *mut _ as *mut libc::c_void;
+
+ let document = hoedown_document_new(renderer, HOEDOWN_EXTENSIONS, 16);
+ hoedown_document_render(document, ob, doc.as_ptr(),
+ doc.len() as libc::size_t);
+ hoedown_document_free(document);
+
+ hoedown_html_renderer_free(renderer);
+ hoedown_buffer_free(ob);
+ }
+}
+
pub fn find_testable_code(doc: &str, tests: &mut ::test::Collector, position: Span) {
tests.set_position(position);
);
for token in tokens {
- match token {
+ match token.trim() {
"" => {},
- "should_panic" => { data.should_panic = true; seen_rust_tags = true; },
- "no_run" => { data.no_run = true; seen_rust_tags = true; },
- "ignore" => { data.ignore = true; seen_rust_tags = true; },
- "rust" => { data.rust = true; seen_rust_tags = true; },
- "test_harness" => { data.test_harness = true; seen_rust_tags = true; },
+ "should_panic" => {
+ data.should_panic = true;
+ seen_rust_tags = seen_other_tags == false;
+ }
+ "no_run" => { data.no_run = true; seen_rust_tags = !seen_other_tags; }
+ "ignore" => { data.ignore = true; seen_rust_tags = !seen_other_tags; }
+ "rust" => { data.rust = true; seen_rust_tags = true; }
+ "test_harness" => {
+ data.test_harness = true;
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
+ }
"compile_fail" if allow_compile_fail => {
data.compile_fail = true;
- seen_rust_tags = true;
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
data.no_run = true;
}
x if allow_error_code_check && x.starts_with("E") && x.len() == 5 => {
if let Ok(_) = x[1..].parse::<u32>() {
data.error_codes.push(x.to_owned());
- seen_rust_tags = true;
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
} else {
seen_other_tags = true;
}
t("test_harness", false, false, false, true, true, false, Vec::new());
t("compile_fail", false, true, false, true, false, true, Vec::new());
t("{.no_run .example}", false, true, false, true, false, false, Vec::new());
- t("{.sh .should_panic}", true, false, false, true, false, false, Vec::new());
+ t("{.sh .should_panic}", true, false, false, false, false, false, Vec::new());
t("{.example .rust}", false, false, false, true, false, false, Vec::new());
t("{.test_harness .rust}", false, false, false, true, true, false, Vec::new());
+ t("text, no_run", false, true, false, false, false, false, Vec::new());
+ t("text,no_run", false, true, false, false, false, false, Vec::new());
}
#[test]
}).peekable();
if let doctree::Plain = s.struct_type {
if fields.peek().is_some() {
- write!(w, "<h2 class='fields'>Fields</h2>")?;
+ write!(w, "<h2 id='fields' class='fields'>Fields</h2>")?;
for (field, ty) in fields {
let id = derive_id(format!("{}.{}",
ItemType::StructField,
}
}).peekable();
if fields.peek().is_some() {
- write!(w, "<h2 class='fields'>Fields</h2>")?;
+ write!(w, "<h2 id='fields' class='fields'>Fields</h2>")?;
for (field, ty) in fields {
write!(w, "<span id='{shortty}.{name}' class=\"{shortty}\"><code>{name}: {ty}</code>
</span>",
document(w, cx, it)?;
if !e.variants.is_empty() {
- write!(w, "<h2 class='variants'>Variants</h2>\n")?;
+ write!(w, "<h2 id='variants' class='variants'>Variants</h2>\n")?;
for variant in &e.variants {
let id = derive_id(format!("{}.{}",
ItemType::Variant,
let it = self.item;
let parentlen = cx.current.len() - if it.is_mod() {1} else {0};
+ if it.is_struct() || it.is_trait() || it.is_primitive() || it.is_union()
+ || it.is_enum() || it.is_mod()
+ {
+ write!(fmt, "<p class='location'>")?;
+ match it.inner {
+ clean::StructItem(..) => write!(fmt, "Struct ")?,
+ clean::TraitItem(..) => write!(fmt, "Trait ")?,
+ clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?,
+ clean::UnionItem(..) => write!(fmt, "Union ")?,
+ clean::EnumItem(..) => write!(fmt, "Enum ")?,
+ clean::ModuleItem(..) => if it.is_crate() {
+ write!(fmt, "Crate ")?;
+ } else {
+ write!(fmt, "Module ")?;
+ },
+ _ => (),
+ }
+ write!(fmt, "{}", it.name.as_ref().unwrap())?;
+ write!(fmt, "</p>")?;
+
+ match it.inner {
+ clean::StructItem(ref s) => sidebar_struct(fmt, it, s)?,
+ clean::TraitItem(ref t) => sidebar_trait(fmt, it, t)?,
+ clean::PrimitiveItem(ref p) => sidebar_primitive(fmt, it, p)?,
+ clean::UnionItem(ref u) => sidebar_union(fmt, it, u)?,
+ clean::EnumItem(ref e) => sidebar_enum(fmt, it, e)?,
+ clean::ModuleItem(ref m) => sidebar_module(fmt, it, &m.items)?,
+ _ => (),
+ }
+ }
+
// The sidebar is designed to display sibling functions, modules and
// other miscellaneous information. since there are lots of sibling
// items (and that causes quadratic growth in large modules),
}
}
+fn sidebar_assoc_items(it: &clean::Item) -> String {
+ let mut out = String::new();
+ let c = cache();
+ if let Some(v) = c.impls.get(&it.def_id) {
+ if v.iter().any(|i| i.inner_impl().trait_.is_none()) {
+ out.push_str("<li><a href=\"#methods\">Methods</a></li>");
+ }
+
+ if v.iter().any(|i| i.inner_impl().trait_.is_some()) {
+ if let Some(impl_) = v.iter()
+ .filter(|i| i.inner_impl().trait_.is_some())
+ .find(|i| i.inner_impl().trait_.def_id() == c.deref_trait_did) {
+ if let Some(target) = impl_.inner_impl().items.iter().filter_map(|item| {
+ match item.inner {
+ clean::TypedefItem(ref t, true) => Some(&t.type_),
+ _ => None,
+ }
+ }).next() {
+ let inner_impl = target.def_id().or(target.primitive_type().and_then(|prim| {
+ c.primitive_locations.get(&prim).cloned()
+ })).and_then(|did| c.impls.get(&did));
+ if inner_impl.is_some() {
+ out.push_str("<li><a href=\"#deref-methods\">");
+ out.push_str(&format!("Methods from {:#}<Target={:#}>",
+ impl_.inner_impl().trait_.as_ref().unwrap(),
+ target));
+ out.push_str("</a></li>");
+ }
+ }
+ }
+ out.push_str("<li><a href=\"#implementations\">Trait Implementations</a></li>");
+ }
+ }
+
+ out
+}
+
+fn sidebar_struct(fmt: &mut fmt::Formatter, it: &clean::Item,
+ s: &clean::Struct) -> fmt::Result {
+ let mut sidebar = String::new();
+
+ if s.fields.iter()
+ .any(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) {
+ if let doctree::Plain = s.struct_type {
+ sidebar.push_str("<li><a href=\"#fields\">Fields</a></li>");
+ }
+ }
+
+ sidebar.push_str(&sidebar_assoc_items(it));
+
+ if !sidebar.is_empty() {
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?;
+ }
+ Ok(())
+}
+
+fn sidebar_trait(fmt: &mut fmt::Formatter, it: &clean::Item,
+ t: &clean::Trait) -> fmt::Result {
+ let mut sidebar = String::new();
+
+ let has_types = t.items.iter().any(|m| m.is_associated_type());
+ let has_consts = t.items.iter().any(|m| m.is_associated_const());
+ let has_required = t.items.iter().any(|m| m.is_ty_method());
+ let has_provided = t.items.iter().any(|m| m.is_method());
+
+ if has_types {
+ sidebar.push_str("<li><a href=\"#associated-types\">Associated Types</a></li>");
+ }
+ if has_consts {
+ sidebar.push_str("<li><a href=\"#associated-const\">Associated Constants</a></li>");
+ }
+ if has_required {
+ sidebar.push_str("<li><a href=\"#required-methods\">Required Methods</a></li>");
+ }
+ if has_provided {
+ sidebar.push_str("<li><a href=\"#provided-methods\">Provided Methods</a></li>");
+ }
+
+ sidebar.push_str(&sidebar_assoc_items(it));
+
+ sidebar.push_str("<li><a href=\"#implementors\">Implementors</a></li>");
+
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)
+}
+
+fn sidebar_primitive(fmt: &mut fmt::Formatter, it: &clean::Item,
+ _p: &clean::PrimitiveType) -> fmt::Result {
+ let sidebar = sidebar_assoc_items(it);
+
+ if !sidebar.is_empty() {
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?;
+ }
+ Ok(())
+}
+
+fn sidebar_union(fmt: &mut fmt::Formatter, it: &clean::Item,
+ u: &clean::Union) -> fmt::Result {
+ let mut sidebar = String::new();
+
+ if u.fields.iter()
+ .any(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) {
+ sidebar.push_str("<li><a href=\"#fields\">Fields</a></li>");
+ }
+
+ sidebar.push_str(&sidebar_assoc_items(it));
+
+ if !sidebar.is_empty() {
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?;
+ }
+ Ok(())
+}
+
+fn sidebar_enum(fmt: &mut fmt::Formatter, it: &clean::Item,
+ e: &clean::Enum) -> fmt::Result {
+ let mut sidebar = String::new();
+
+ if !e.variants.is_empty() {
+ sidebar.push_str("<li><a href=\"#variants\">Variants</a></li>");
+ }
+
+ sidebar.push_str(&sidebar_assoc_items(it));
+
+ if !sidebar.is_empty() {
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?;
+ }
+ Ok(())
+}
+
+fn sidebar_module(fmt: &mut fmt::Formatter, _it: &clean::Item,
+ items: &[clean::Item]) -> fmt::Result {
+ let mut sidebar = String::new();
+
+ if items.iter().any(|it| it.type_() == ItemType::ExternCrate ||
+ it.type_() == ItemType::Import) {
+ sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>",
+ id = "reexports",
+ name = "Reexports"));
+ }
+
+ // ordering taken from item_module, reorder, where it prioritized elements in a certain order
+ // to print its headings
+ for &myty in &[ItemType::Primitive, ItemType::Module, ItemType::Macro, ItemType::Struct,
+ ItemType::Enum, ItemType::Constant, ItemType::Static, ItemType::Trait,
+ ItemType::Function, ItemType::Typedef, ItemType::Union, ItemType::Impl,
+ ItemType::TyMethod, ItemType::Method, ItemType::StructField, ItemType::Variant,
+ ItemType::AssociatedType, ItemType::AssociatedConst] {
+ if items.iter().any(|it| {
+ if let clean::DefaultImplItem(..) = it.inner {
+ false
+ } else {
+ !maybe_ignore_item(it) && !it.is_stripped() && it.type_() == myty
+ }
+ }) {
+ let (short, name) = match myty {
+ ItemType::ExternCrate |
+ ItemType::Import => ("reexports", "Reexports"),
+ ItemType::Module => ("modules", "Modules"),
+ ItemType::Struct => ("structs", "Structs"),
+ ItemType::Union => ("unions", "Unions"),
+ ItemType::Enum => ("enums", "Enums"),
+ ItemType::Function => ("functions", "Functions"),
+ ItemType::Typedef => ("types", "Type Definitions"),
+ ItemType::Static => ("statics", "Statics"),
+ ItemType::Constant => ("constants", "Constants"),
+ ItemType::Trait => ("traits", "Traits"),
+ ItemType::Impl => ("impls", "Implementations"),
+ ItemType::TyMethod => ("tymethods", "Type Methods"),
+ ItemType::Method => ("methods", "Methods"),
+ ItemType::StructField => ("fields", "Struct Fields"),
+ ItemType::Variant => ("variants", "Variants"),
+ ItemType::Macro => ("macros", "Macros"),
+ ItemType::Primitive => ("primitives", "Primitive Types"),
+ ItemType::AssociatedType => ("associated-types", "Associated Types"),
+ ItemType::AssociatedConst => ("associated-consts", "Associated Constants"),
+ };
+ sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>",
+ id = short,
+ name = name));
+ }
+ }
+
+ if !sidebar.is_empty() {
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?;
+ }
+ Ok(())
+}
+
impl<'a> fmt::Display for Source<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Source(s) = *self;
use html::render::reset_ids;
use html::escape::Escape;
use html::markdown;
-use html::markdown::{Markdown, MarkdownWithToc, find_testable_code};
+use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, old_find_testable_code};
use test::{TestOptions, Collector};
/// Separate any lines at the start of the file that begin with `# ` or `%`.
let mut collector = Collector::new(input.to_string(), cfgs, libs, externs,
true, opts, maybe_sysroot, None,
Some(input.to_owned()));
+ old_find_testable_code(&input_str, &mut collector, DUMMY_SP);
find_testable_code(&input_str, &mut collector, DUMMY_SP);
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(&test_args, collector.tests);
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
+ // to be removed when hoedown will be definitely gone
+ pub old_tests: Vec<String>,
names: Vec<String>,
cfgs: Vec<String>,
libs: SearchPaths,
codemap: Option<Rc<CodeMap>>, filename: Option<String>) -> Collector {
Collector {
tests: Vec::new(),
+ old_tests: Vec::new(),
names: Vec::new(),
cfgs: cfgs,
libs: libs,
}
}
- pub fn add_test(&mut self, test: String,
- should_panic: bool, no_run: bool, should_ignore: bool,
- as_test_harness: bool, compile_fail: bool, error_codes: Vec<String>,
- line: usize, filename: String) {
- let name = if self.use_headers {
+ fn generate_name(&self, line: usize, filename: &str) -> String {
+ if self.use_headers {
if let Some(ref header) = self.current_header {
format!("{} - {} (line {})", filename, header, line)
} else {
}
} else {
format!("{} - {} (line {})", filename, self.names.join("::"), line)
- };
+ }
+ }
+
+ pub fn add_old_test(&mut self, line: usize, filename: String) {
+ let name = self.generate_name(line, &filename);
+ self.old_tests.push(name);
+ }
+
+ pub fn add_test(&mut self, test: String,
+ should_panic: bool, no_run: bool, should_ignore: bool,
+ as_test_harness: bool, compile_fail: bool, error_codes: Vec<String>,
+ line: usize, filename: String) {
+ let name = self.generate_name(line, &filename);
+ if self.old_tests.iter().find(|&x| x == &name).is_none() {
+ let _ = writeln!(&mut io::stderr(),
+ "WARNING: {} Code block is not currently run as a test, but will in \
+ future versions of rustdoc. Please ensure this code block is a \
+ runnable test, or use the `ignore` directive.",
+ name);
+ return
+ }
let cfgs = self.cfgs.clone();
let libs = self.libs.clone();
let externs = self.externs.clone();
attrs.unindent_doc_comments();
if let Some(doc) = attrs.doc_value() {
self.collector.cnt = 0;
+ markdown::old_find_testable_code(doc, self.collector,
+ attrs.span.unwrap_or(DUMMY_SP));
markdown::find_testable_code(doc, self.collector,
attrs.span.unwrap_or(DUMMY_SP));
}
}
// If we're inlining, skip private items.
_ if self.inlining && item.vis != hir::Public => {}
+ hir::ItemGlobalAsm(..) => {}
hir::ItemExternCrate(ref p) => {
let cstore = &self.cx.sess().cstore;
om.extern_crates.push(ExternCrate {
use rustc::hir::def::Def;
use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId};
use rustc::ty::Visibility;
+use rustc::util::nodemap::FxHashSet;
use std::cell::RefMut;
access_levels: RefMut<'a, AccessLevels<DefId>>,
// Previous accessibility level, None means unreachable
prev_level: Option<AccessLevel>,
+ // Keeps track of already visited modules, in case a module re-exports its parent
+ visited_mods: FxHashSet<DefId>,
}
impl<'a, 'b, 'tcx> LibEmbargoVisitor<'a, 'b, 'tcx> {
cstore: &*cx.sess().cstore,
access_levels: cx.access_levels.borrow_mut(),
prev_level: Some(AccessLevel::Public),
+ visited_mods: FxHashSet()
}
}
}
pub fn visit_mod(&mut self, def_id: DefId) {
+ if !self.visited_mods.insert(def_id) {
+ return;
+ }
+
for item in self.cstore.item_children(def_id) {
self.visit_item(item.def);
}
// 1. Alfredo Viola (2005). Distributional analysis of Robin Hood linear probing
// hashing with buckets.
-/// A hash map implementation which uses linear probing with Robin Hood bucket
-/// stealing.
+/// A hash map implemented with linear probing and Robin Hood bucket stealing.
///
/// By default, `HashMap` uses a hashing algorithm selected to provide
/// resistance against HashDoS attacks. The algorithm is randomly seeded, and a
/// attacks such as HashDoS.
///
/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
-/// [`HashMap::default`], [`HashMap::with_hasher`], and
-/// [`HashMap::with_capacity_and_hasher`] methods. Many alternative algorithms
-/// are available on crates.io, such as the [`fnv`] crate.
+/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many
+/// alternative algorithms are available on crates.io, such as the [`fnv`] crate.
///
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
/// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html
/// [`RefCell`]: ../../std/cell/struct.RefCell.html
/// [`Cell`]: ../../std/cell/struct.Cell.html
-/// [`HashMap::default`]: #method.default
-/// [`HashMap::with_hasher`]: #method.with_hasher
-/// [`HashMap::with_capacity_and_hasher`]: #method.with_capacity_and_hasher
+/// [`default`]: #method.default
+/// [`with_hasher`]: #method.with_hasher
+/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher
/// [`fnv`]: https://crates.io/crates/fnv
///
/// ```
/// }
/// ```
///
-/// A HashMap with fixed list of elements can be initialized from an array:
+/// A `HashMap` with fixed list of elements can be initialized from an array:
///
/// ```
/// use std::collections::HashMap;
}
}
- /// Creates an empty `HashMap` with the specified capacity, using `hasher`
+ /// Creates an empty `HashMap` with the specified capacity, using `hash_builder`
/// to hash the keys.
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
- /// Warning: `hasher` is normally randomly generated, and
+ ///
+ /// Warning: `hash_builder` is normally randomly generated, and
/// is designed to allow HashMaps to be resistant to attacks that
/// cause many collisions and very poor performance. Setting it
/// manually using this function can expose a DoS attack vector.
}
}
- /// Returns a reference to the map's hasher.
+ /// Returns a reference to the map's [`BuildHasher`].
+ ///
+ /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
#[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
pub fn hasher(&self) -> &S {
&self.hash_builder
}
/// An iterator visiting all keys in arbitrary order.
- /// Iterator element type is `&'a K`.
+ /// The iterator element type is `&'a K`.
///
/// # Examples
///
}
/// An iterator visiting all values in arbitrary order.
- /// Iterator element type is `&'a V`.
+ /// The iterator element type is `&'a V`.
///
/// # Examples
///
}
/// An iterator visiting all values mutably in arbitrary order.
- /// Iterator element type is `&'a mut V`.
+ /// The iterator element type is `&'a mut V`.
///
/// # Examples
///
}
/// An iterator visiting all key-value pairs in arbitrary order.
- /// Iterator element type is `(&'a K, &'a V)`.
+ /// The iterator element type is `(&'a K, &'a V)`.
///
/// # Examples
///
/// An iterator visiting all key-value pairs in arbitrary order,
/// with mutable references to the values.
- /// Iterator element type is `(&'a K, &'a mut V)`.
+ /// The iterator element type is `(&'a K, &'a mut V)`.
///
/// # Examples
///
}
}
-/// HashMap iterator.
+/// An iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.HashMap.html#method.iter
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
inner: table::Iter<'a, K, V>,
}
}
-/// HashMap mutable values iterator.
+/// A mutable iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.HashMap.html#method.iter_mut
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, K: 'a, V: 'a> {
inner: table::IterMut<'a, K, V>,
}
-/// HashMap move iterator.
+/// An owning iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`HashMap`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.HashMap.html#method.into_iter
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K, V> {
pub(super) inner: table::IntoIter<K, V>,
}
-/// HashMap keys iterator.
+/// An iterator over the keys of a `HashMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: struct.HashMap.html#method.keys
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
}
-/// HashMap values iterator.
+/// An iterator over the values of a `HashMap`.
+///
+/// This `struct` is created by the [`values`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: struct.HashMap.html#method.values
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
}
-/// HashMap drain iterator.
+/// A draining iterator over the entries of a `HashMap`.
+///
+/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`drain`]: struct.HashMap.html#method.drain
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, K: 'a, V: 'a> {
pub(super) inner: table::Drain<'a, K, V>,
}
-/// Mutable HashMap values iterator.
+/// A mutable iterator over the values of a `HashMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: struct.HashMap.html#method.values_mut
+/// [`HashMap`]: struct.HashMap.html
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub struct ValuesMut<'a, K: 'a, V: 'a> {
inner: IterMut<'a, K, V>,
}
}
-/// A view into a single location in a map, which may be vacant or occupied.
-/// This enum is constructed from the [`entry`] method on [`HashMap`].
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`entry`]: struct.HashMap.html#method.entry
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Entry<'a, K: 'a, V: 'a> {
- /// An occupied Entry.
+ /// An occupied entry.
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(#[stable(feature = "rust1", since = "1.0.0")]
OccupiedEntry<'a, K, V>),
- /// A vacant Entry.
+ /// A vacant entry.
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(#[stable(feature = "rust1", since = "1.0.0")]
VacantEntry<'a, K, V>),
}
}
-/// A view into a single occupied location in a HashMap.
+/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
}
}
-/// A view into a single empty location in a HashMap.
+/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Default for DefaultHasher {
- /// Creates a new `DefaultHasher` using [`DefaultHasher::new`]. See
- /// [`DefaultHasher::new`] documentation for more information.
+ /// Creates a new `DefaultHasher` using [`new`]. See its documentation for more.
///
- /// [`DefaultHasher::new`]: #method.new
+ /// [`new`]: #method.new
fn default() -> DefaultHasher {
DefaultHasher::new()
}
// for `bucket.val` in the case of HashSet. I suppose we would need HKT
// to get rid of it properly.
-/// An implementation of a hash set using the underlying representation of a
-/// HashMap where the value is ().
+/// A hash set implemented as a `HashMap` where the value is `()`.
///
-/// As with the `HashMap` type, a `HashSet` requires that the elements
-/// implement the `Eq` and `Hash` traits. This can frequently be achieved by
+/// As with the [`HashMap`] type, a `HashSet` requires that the elements
+/// implement the [`Eq`] and [`Hash`] traits. This can frequently be achieved by
/// using `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself,
/// it is important that the following property holds:
///
///
///
/// It is a logic error for an item to be modified in such a way that the
-/// item's hash, as determined by the `Hash` trait, or its equality, as
-/// determined by the `Eq` trait, changes while it is in the set. This is
-/// normally only possible through `Cell`, `RefCell`, global state, I/O, or
+/// item's hash, as determined by the [`Hash`] trait, or its equality, as
+/// determined by the [`Eq`] trait, changes while it is in the set. This is
+/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or
/// unsafe code.
///
/// # Examples
/// ```
///
/// The easiest way to use `HashSet` with a custom type is to derive
-/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
-/// future be implied by `Eq`.
+/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`], this will in the
+/// future be implied by [`Eq`].
///
/// ```
/// use std::collections::HashSet;
/// }
/// ```
///
-/// HashSet with fixed list of elements can be initialized from an array:
+/// A `HashSet` with fixed list of elements can be initialized from an array:
///
/// ```
/// use std::collections::HashSet;
/// // use the values stored in the set
/// }
/// ```
+///
+/// [`Cell`]: ../../std/cell/struct.Cell.html
+/// [`Eq`]: ../../std/cmp/trait.Eq.html
+/// [`Hash`]: ../../std/hash/trait.Hash.html
+/// [`HashMap`]: struct.HashMap.html
+/// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html
+/// [`RefCell`]: ../../std/cell/struct.RefCell.html
#[derive(Clone)]
HashSet { map: HashMap::with_hasher(hasher) }
}
- /// Creates an empty HashSet with with the specified capacity, using
+ /// Creates an empty `HashSet` with with the specified capacity, using
/// `hasher` to hash the keys.
///
/// The hash set will be able to hold at least `capacity` elements without
HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
}
- /// Returns a reference to the set's hasher.
+ /// Returns a reference to the set's [`BuildHasher`].
+ ///
+ /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
#[stable(feature = "hashmap_public_hasher", since = "1.9.0")]
pub fn hasher(&self) -> &S {
self.map.hasher()
}
/// An iterator visiting all elements in arbitrary order.
- /// Iterator element type is &'a T.
+ /// The iterator element type is `&'a T`.
///
/// # Examples
///
Iter { iter: self.map.keys() }
}
- /// Visit the values representing the difference,
+ /// Visits the values representing the difference,
/// i.e. the values that are in `self` but not in `other`.
///
/// # Examples
}
}
- /// Visit the values representing the symmetric difference,
+ /// Visits the values representing the symmetric difference,
/// i.e. the values that are in `self` or in `other` but not in both.
///
/// # Examples
SymmetricDifference { iter: self.difference(other).chain(other.difference(self)) }
}
- /// Visit the values representing the intersection,
+ /// Visits the values representing the intersection,
/// i.e. the values that are both in `self` and `other`.
///
/// # Examples
}
}
- /// Visit the values representing the union,
+ /// Visits the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates.
///
/// # Examples
/// Returns `true` if the set contains a value.
///
/// The value may be any borrowed form of the set's value type, but
- /// `Hash` and `Eq` on the borrowed form *must* match those for
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the value type.
///
/// # Examples
/// assert_eq!(set.contains(&1), true);
/// assert_eq!(set.contains(&4), false);
/// ```
+ ///
+ /// [`Eq`]: ../../std/cmp/trait.Eq.html
+ /// [`Hash`]: ../../std/hash/trait.Hash.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
where T: Borrow<Q>,
/// Returns a reference to the value in the set, if any, that is equal to the given value.
///
/// The value may be any borrowed form of the set's value type, but
- /// `Hash` and `Eq` on the borrowed form *must* match those for
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the value type.
+ ///
+ /// [`Eq`]: ../../std/cmp/trait.Eq.html
+ /// [`Hash`]: ../../std/hash/trait.Hash.html
#[stable(feature = "set_recovery", since = "1.9.0")]
pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
where T: Borrow<Q>,
/// present in the set.
///
/// The value may be any borrowed form of the set's value type, but
- /// `Hash` and `Eq` on the borrowed form *must* match those for
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the value type.
///
/// # Examples
/// assert_eq!(set.remove(&2), true);
/// assert_eq!(set.remove(&2), false);
/// ```
+ ///
+ /// [`Eq`]: ../../std/cmp/trait.Eq.html
+ /// [`Hash`]: ../../std/hash/trait.Hash.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
where T: Borrow<Q>,
/// Removes and returns the value in the set, if any, that is equal to the given one.
///
/// The value may be any borrowed form of the set's value type, but
- /// `Hash` and `Eq` on the borrowed form *must* match those for
+ /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
/// the value type.
+ ///
+ /// [`Eq`]: ../../std/cmp/trait.Eq.html
+ /// [`Hash`]: ../../std/hash/trait.Hash.html
#[stable(feature = "set_recovery", since = "1.9.0")]
pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
where T: Borrow<Q>,
}
}
-/// HashSet iterator
+/// An iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`iter`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`iter`]: struct.HashSet.html#method.iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a> {
iter: Keys<'a, K, ()>,
}
-/// HashSet move iterator
+/// An owning iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`HashSet`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`into_iter`]: struct.HashSet.html#method.into_iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K> {
iter: map::IntoIter<K, ()>,
}
-/// HashSet drain iterator
+/// A draining iterator over the items of a `HashSet`.
+///
+/// This `struct` is created by the [`drain`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`drain`]: struct.HashSet.html#method.drain
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Drain<'a, K: 'a> {
iter: map::Drain<'a, K, ()>,
}
-/// Intersection iterator
+/// A lazy iterator producing elements in the intersection of `HashSet`s.
+///
+/// This `struct` is created by the [`intersection`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`intersection`]: struct.HashSet.html#method.intersection
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Intersection<'a, T: 'a, S: 'a> {
// iterator of the first set
other: &'a HashSet<T, S>,
}
-/// Difference iterator
+/// A lazy iterator producing elements in the difference of `HashSet`s.
+///
+/// This `struct` is created by the [`difference`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`difference`]: struct.HashSet.html#method.difference
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Difference<'a, T: 'a, S: 'a> {
// iterator of the first set
other: &'a HashSet<T, S>,
}
-/// Symmetric difference iterator.
+/// A lazy iterator producing elements in the symmetric difference of `HashSet`s.
+///
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`HashSet`]. See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SymmetricDifference<'a, T: 'a, S: 'a> {
iter: Chain<Difference<'a, T, S>, Difference<'a, T, S>>,
}
-/// Set union iterator.
+/// A lazy iterator producing elements in the union of `HashSet`s.
+///
+/// This `struct` is created by the [`union`] method on [`HashSet`].
+/// See its documentation for more.
+///
+/// [`HashSet`]: struct.HashSet.html
+/// [`union`]: struct.HashSet.html#method.union
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Union<'a, T: 'a, S: 'a> {
iter: Chain<Iter<'a, T>, Difference<'a, T, S>>,
//! information to do this itself. Therefore, it is up to us programmers to give
//! it hints.
//!
-//! Any `with_capacity()` constructor will instruct the collection to allocate
+//! Any `with_capacity` constructor will instruct the collection to allocate
//! enough space for the specified number of elements. Ideally this will be for
//! exactly that many elements, but some implementation details may prevent
//! this. [`Vec`] and [`VecDeque`] can be relied on to allocate exactly the
-//! requested amount, though. Use `with_capacity()` when you know exactly how many
+//! requested amount, though. Use `with_capacity` when you know exactly how many
//! elements will be inserted, or at least have a reasonable upper-bound on that
//! number.
//!
-//! When anticipating a large influx of elements, the `reserve()` family of
+//! When anticipating a large influx of elements, the `reserve` family of
//! methods can be used to hint to the collection how much room it should make
-//! for the coming items. As with `with_capacity()`, the precise behavior of
+//! for the coming items. As with `with_capacity`, the precise behavior of
//! these methods will be specific to the collection of interest.
//!
//! For optimal performance, collections will generally avoid shrinking
//! themselves. If you believe that a collection will not soon contain any more
-//! elements, or just really need the memory, the `shrink_to_fit()` method prompts
+//! elements, or just really need the memory, the `shrink_to_fit` method prompts
//! the collection to shrink the backing array to the minimum size capable of
//! holding its elements.
//!
//! Finally, if ever you're interested in what the actual capacity of the
-//! collection is, most collections provide a `capacity()` method to query this
+//! collection is, most collections provide a `capacity` method to query this
//! information on demand. This can be useful for debugging purposes, or for
-//! use with the `reserve()` methods.
+//! use with the `reserve` methods.
//!
//! ## Iterators
//!
//!
//! All of the standard collections provide several iterators for performing
//! bulk manipulation of their contents. The three primary iterators almost
-//! every collection should provide are `iter()`, `iter_mut()`, and `into_iter()`.
+//! every collection should provide are `iter`, `iter_mut`, and `into_iter`.
//! Some of these are not provided on collections where it would be unsound or
//! unreasonable to provide them.
//!
-//! `iter()` provides an iterator of immutable references to all the contents of a
+//! `iter` provides an iterator of immutable references to all the contents of a
//! collection in the most "natural" order. For sequence collections like [`Vec`],
//! this means the items will be yielded in increasing order of index starting
//! at 0. For ordered collections like [`BTreeMap`], this means that the items
//! }
//! ```
//!
-//! `iter_mut()` provides an iterator of *mutable* references in the same order as
-//! `iter()`. This is great for mutating all the contents of the collection.
+//! `iter_mut` provides an iterator of *mutable* references in the same order as
+//! `iter`. This is great for mutating all the contents of the collection.
//!
//! ```
//! let mut vec = vec![1, 2, 3, 4];
//! }
//! ```
//!
-//! `into_iter()` transforms the actual collection into an iterator over its
+//! `into_iter` transforms the actual collection into an iterator over its
//! contents by-value. This is great when the collection itself is no longer
-//! needed, and the values are needed elsewhere. Using `extend()` with `into_iter()`
+//! needed, and the values are needed elsewhere. Using `extend` with `into_iter`
//! is the main way that contents of one collection are moved into another.
-//! `extend()` automatically calls `into_iter()`, and takes any `T: `[`IntoIterator`].
-//! Calling `collect()` on an iterator itself is also a great way to convert one
+//! `extend` automatically calls `into_iter`, and takes any `T: `[`IntoIterator`].
+//! Calling `collect` on an iterator itself is also a great way to convert one
//! collection into another. Both of these methods should internally use the
//! capacity management tools discussed in the previous section to do this as
//! efficiently as possible.
//! ```
//!
//! Iterators also provide a series of *adapter* methods for performing common
-//! threads to sequences. Among the adapters are functional favorites like `map()`,
-//! `fold()`, `skip()` and `take()`. Of particular interest to collections is the
-//! `rev()` adapter, that reverses any iterator that supports this operation. Most
+//! threads to sequences. Among the adapters are functional favorites like `map`,
+//! `fold`, `skip` and `take`. Of particular interest to collections is the
+//! `rev` adapter, that reverses any iterator that supports this operation. Most
//! collections provide reversible iterators as the way to iterate over them in
//! reverse order.
//!
//!
//! Several other collection methods also return iterators to yield a sequence
//! of results but avoid allocating an entire collection to store the result in.
-//! This provides maximum flexibility as `collect()` or `extend()` can be called to
+//! This provides maximum flexibility as `collect` or `extend` can be called to
//! "pipe" the sequence into any collection if desired. Otherwise, the sequence
//! can be looped over with a `for` loop. The iterator can also be discarded
//! after partial use, preventing the computation of the unused items.
//!
//! ## Entries
//!
-//! The `entry()` API is intended to provide an efficient mechanism for
+//! The `entry` API is intended to provide an efficient mechanism for
//! manipulating the contents of a map conditionally on the presence of a key or
//! not. The primary motivating use case for this is to provide efficient
//! accumulator maps. For instance, if one wishes to maintain a count of the
//! number of times each key has been seen, they will have to perform some
//! conditional logic on whether this is the first time the key has been seen or
-//! not. Normally, this would require a `find()` followed by an `insert()`,
+//! not. Normally, this would require a `find` followed by an `insert`,
//! effectively duplicating the search effort on each insertion.
//!
//! When a user calls `map.entry(&key)`, the map will search for the key and
//! then yield a variant of the `Entry` enum.
//!
//! If a `Vacant(entry)` is yielded, then the key *was not* found. In this case
-//! the only valid operation is to `insert()` a value into the entry. When this is
+//! the only valid operation is to `insert` a value into the entry. When this is
//! done, the vacant entry is consumed and converted into a mutable reference to
//! the value that was inserted. This allows for further manipulation of the
//! value beyond the lifetime of the search itself. This is useful if complex
//! just inserted.
//!
//! If an `Occupied(entry)` is yielded, then the key *was* found. In this case,
-//! the user has several options: they can `get()`, `insert()` or `remove()` the
+//! the user has several options: they can `get`, `insert` or `remove` the
//! value of the occupied entry. Additionally, they can convert the occupied
//! entry into a mutable reference to its value, providing symmetry to the
-//! vacant `insert()` case.
+//! vacant `insert` case.
//!
//! ### Examples
//!
-//! Here are the two primary ways in which `entry()` is used. First, a simple
+//! Here are the two primary ways in which `entry` is used. First, a simple
//! example where the logic performed on the values is trivial.
//!
//! #### Counting the number of times each character in a string occurs
//! ```
//!
//! When the logic to be performed on the value is more complex, we may simply
-//! use the `entry()` API to ensure that the value is initialized and perform the
+//! use the `entry` API to ensure that the value is initialized and perform the
//! logic afterwards.
//!
//! #### Tracking the inebriation of customers at a bar
//!
//! # Insert and complex keys
//!
-//! If we have a more complex key, calls to `insert()` will
+//! If we have a more complex key, calls to `insert` will
//! not update the value of the key. For example:
//!
//! ```
#[stable(feature = "rust1", since = "1.0.0")]
pub mod hash_map {
- //! A hash map implementation which uses linear probing with Robin
- //! Hood bucket stealing.
+ //! A hash map implemented with linear probing and Robin Hood bucket stealing.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::hash::map::*;
}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod hash_set {
- //! An implementation of a hash set using the underlying representation of a
- //! HashMap where the value is ().
+ //! A hash set implemented as a `HashMap` where the value is `()`.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::hash::set::*;
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! The 32-bit floating point type.
+//! This module provides constants which are specific to the implementation
+//! of the `f32` floating point data type. Mathematically significant
+//! numbers are provided in the `consts` sub-module.
//!
//! *[See also the `f32` primitive type](../primitive.f32.html).*
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! The 64-bit floating point type.
+//! This module provides constants which are specific to the implementation
+//! of the `f64` floating point data type. Mathematically significant
+//! numbers are provided in the `consts` sub-module.
//!
//! *[See also the `f64` primitive type](../primitive.f64.html).*
//! compiler - but they are documented here the same). Like the prelude, the
//! standard macros are imported by default into all crates.
//!
+//! # Contributing changes to the documentation
+//!
+//! Check out the rust contribution guidelines [here](
+//! https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md).
+//! The source for this documentation can be found on [Github](https://github.com/rust-lang).
+//! To contribute changes, make sure you read the guidelines first, then submit
+//! pull-requests for your suggested changes.
+//!
+//! Contributions are appreciated! If you see a part of the docs that can be
+//! improved, submit a PR, or chat with us first on irc.mozilla.org #rust-docs.
+//!
//! # A Tour of The Rust Standard Library
//!
//! The rest of this crate documentation is dedicated to pointing out notable
/// Arrays of sizes from 0 to 32 (inclusive) implement the following traits if
/// the element type allows it:
///
-/// - [`Clone`][clone] (only if `T: [Copy][copy]`)
+/// - [`Clone`][clone] (only if `T: `[`Copy`][copy])
/// - [`Debug`][debug]
/// - [`IntoIterator`][intoiterator] (implemented for `&[T; N]` and `&mut [T; N]`)
/// - [`PartialEq`][partialeq], [`PartialOrd`][partialord], [`Eq`][eq], [`Ord`][ord]
///
/// This documentation describes a number of methods and trait implementations
/// on the `str` type. For technical reasons, there is additional, separate
-/// documentation in [the `std::str` module](str/index.html) as well.
+/// documentation in the [`std::str`](str/index.html) module as well.
///
/// # Examples
///
/// # Representation
///
/// A `&str` is made up of two components: a pointer to some bytes, and a
-/// length. You can look at these with the [`.as_ptr`] and [`len`] methods:
+/// length. You can look at these with the [`as_ptr`] and [`len`] methods:
///
/// ```
/// use std::slice;
/// assert_eq!(s, Ok(story));
/// ```
///
-/// [`.as_ptr`]: #method.as_ptr
+/// [`as_ptr`]: #method.as_ptr
/// [`len`]: #method.len
///
/// Note: This example shows the internals of `&str`. `unsafe` should not be
-/// used to get a string slice under normal circumstances. Use `.as_slice()`
+/// used to get a string slice under normal circumstances. Use `as_slice`
/// instead.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_str { }
// ERR_NO_RESOURCES: The system was not able to allocate some resource
// needed for the operation.
-#[allow(unused)] pub const ERR_NO_RESOURCES: mx_status_t = -5;
+#[allow(unused)] pub const ERR_NO_RESOURCES: mx_status_t = -3;
// ERR_NO_MEMORY: The system was not able to allocate memory needed
// for the operation.
// ERR_CALL_FAILED: The second phase of mx_channel_call(; did not complete
// successfully.
-#[allow(unused)] pub const ERR_CALL_FAILED: mx_status_t = -53;
+#[allow(unused)] pub const ERR_CALL_FAILED: mx_status_t = -5;
+
+// ERR_INTERRUPTED_RETRY: The system call was interrupted, but should be
+// retried. This should not be seen outside of the VDSO.
+#[allow(unused)] pub const ERR_INTERRUPTED_RETRY: mx_status_t = -6;
// ======= Parameter errors =======
// ERR_INVALID_ARGS: an argument is invalid, ex. null pointer
#[allow(unused)] pub const ERR_INVALID_ARGS: mx_status_t = -10;
+// ERR_BAD_HANDLE: A specified handle value does not refer to a handle.
+#[allow(unused)] pub const ERR_BAD_HANDLE: mx_status_t = -11;
+
// ERR_WRONG_TYPE: The subject of the operation is the wrong type to
// perform the operation.
// Example: Attempting a message_read on a thread handle.
-#[allow(unused)] pub const ERR_WRONG_TYPE: mx_status_t = -54;
+#[allow(unused)] pub const ERR_WRONG_TYPE: mx_status_t = -12;
// ERR_BAD_SYSCALL: The specified syscall number is invalid.
-#[allow(unused)] pub const ERR_BAD_SYSCALL: mx_status_t = -11;
-
-// ERR_BAD_HANDLE: A specified handle value does not refer to a handle.
-#[allow(unused)] pub const ERR_BAD_HANDLE: mx_status_t = -12;
+#[allow(unused)] pub const ERR_BAD_SYSCALL: mx_status_t = -13;
// ERR_OUT_OF_RANGE: An argument is outside the valid range for this
// operation.
-#[allow(unused)] pub const ERR_OUT_OF_RANGE: mx_status_t = -13;
+#[allow(unused)] pub const ERR_OUT_OF_RANGE: mx_status_t = -14;
// ERR_BUFFER_TOO_SMALL: A caller provided buffer is too small for
// this operation.
-#[allow(unused)] pub const ERR_BUFFER_TOO_SMALL: mx_status_t = -14;
+#[allow(unused)] pub const ERR_BUFFER_TOO_SMALL: mx_status_t = -15;
// ======= Precondition or state errors =======
// ERR_BAD_STATE: operation failed because the current state of the
// not satisfied
#[allow(unused)] pub const ERR_BAD_STATE: mx_status_t = -20;
+// ERR_TIMED_OUT: The time limit for the operation elapsed before
+// the operation completed.
+#[allow(unused)] pub const ERR_TIMED_OUT: mx_status_t = -21;
+
+// ERR_SHOULD_WAIT: The operation cannot be performed currently but
+// potentially could succeed if the caller waits for a prerequisite
+// to be satisfied, for example waiting for a handle to be readable
+// or writable.
+// Example: Attempting to read from a message pipe that has no
+// messages waiting but has an open remote will return ERR_SHOULD_WAIT.
+// Attempting to read from a message pipe that has no messages waiting
+// and has a closed remote end will return ERR_REMOTE_CLOSED.
+#[allow(unused)] pub const ERR_SHOULD_WAIT: mx_status_t = -22;
+
+// ERR_CANCELED: The in-progress operation (e.g. a wait) has been
+// // canceled.
+#[allow(unused)] pub const ERR_CANCELED: mx_status_t = -23;
+
+// ERR_PEER_CLOSED: The operation failed because the remote end
+// of the subject of the operation was closed.
+#[allow(unused)] pub const ERR_PEER_CLOSED: mx_status_t = -24;
+
// ERR_NOT_FOUND: The requested entity is not found.
-#[allow(unused)] pub const ERR_NOT_FOUND: mx_status_t = -3;
+#[allow(unused)] pub const ERR_NOT_FOUND: mx_status_t = -25;
// ERR_ALREADY_EXISTS: An object with the specified identifier
// already exists.
// Example: Attempting to create a file when a file already exists
// with that name.
-#[allow(unused)] pub const ERR_ALREADY_EXISTS: mx_status_t = -15;
+#[allow(unused)] pub const ERR_ALREADY_EXISTS: mx_status_t = -26;
// ERR_ALREADY_BOUND: The operation failed because the named entity
// is already owned or controlled by another entity. The operation
// could succeed later if the current owner releases the entity.
-#[allow(unused)] pub const ERR_ALREADY_BOUND: mx_status_t = -16;
-
-// ERR_TIMED_OUT: The time limit for the operation elapsed before
-// the operation completed.
-#[allow(unused)] pub const ERR_TIMED_OUT: mx_status_t = -23;
-
-// ERR_HANDLE_CLOSED: a handle being waited on was closed
-#[allow(unused)] pub const ERR_HANDLE_CLOSED: mx_status_t = -24;
-
-// ERR_REMOTE_CLOSED: The operation failed because the remote end
-// of the subject of the operation was closed.
-#[allow(unused)] pub const ERR_REMOTE_CLOSED: mx_status_t = -25;
+#[allow(unused)] pub const ERR_ALREADY_BOUND: mx_status_t = -27;
// ERR_UNAVAILABLE: The subject of the operation is currently unable
// to perform the operation.
// Note: This is used when there's no direct way for the caller to
// observe when the subject will be able to perform the operation
// and should thus retry.
-#[allow(unused)] pub const ERR_UNAVAILABLE: mx_status_t = -26;
-
-// ERR_SHOULD_WAIT: The operation cannot be performed currently but
-// potentially could succeed if the caller waits for a prerequisite
-// to be satisfied, for example waiting for a handle to be readable
-// or writable.
-// Example: Attempting to read from a message pipe that has no
-// messages waiting but has an open remote will return ERR_SHOULD_WAIT.
-// Attempting to read from a message pipe that has no messages waiting
-// and has a closed remote end will return ERR_REMOTE_CLOSED.
-#[allow(unused)] pub const ERR_SHOULD_WAIT: mx_status_t = -27;
+#[allow(unused)] pub const ERR_UNAVAILABLE: mx_status_t = -28;
// ======= Permission check errors =======
// ERR_ACCESS_DENIED: The caller did not have permission to perform
#[allow(unused)] pub const ERR_BAD_PATH: mx_status_t = -50;
#[allow(unused)] pub const ERR_NOT_DIR: mx_status_t = -51;
#[allow(unused)] pub const ERR_NOT_FILE: mx_status_t = -52;
+// ERR_FILE_BIG: A file exceeds a filesystem-specific size limit.
+#[allow(unused)] pub const ERR_FILE_BIG: mx_status_t = -53;
+// ERR_NO_SPACE: Filesystem or device space is exhausted.
+#[allow(unused)] pub const ERR_NO_SPACE: mx_status_t = -54;
/// A unique identifier for a running thread.
///
/// A `ThreadId` is an opaque object that has a unique value for each thread
-/// that creates one. `ThreadId`s do not correspond to a thread's system-
-/// designated identifier.
+/// that creates one. `ThreadId`s are not guaranteed to correspond to a thread's
+/// system-designated identifier.
///
/// # Examples
///
///
/// use std::thread;
///
-/// let handler = thread::Builder::new()
-/// .spawn(|| {
-/// let thread = thread::current();
-/// let thread_id = thread.id();
-/// })
-/// .unwrap();
+/// let other_thread = thread::spawn(|| {
+/// thread::current().id()
+/// });
///
-/// handler.join().unwrap();
+/// let other_thread_id = other_thread.join().unwrap();
+/// assert!(thread::current().id() != other_thread_id);
/// ```
#[unstable(feature = "thread_id", issue = "21507")]
-#[derive(Eq, PartialEq, Copy, Clone)]
+#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
pub struct ThreadId(u64);
impl ThreadId {
}
}
-#[unstable(feature = "thread_id", issue = "21507")]
-impl fmt::Debug for ThreadId {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad("ThreadId { .. }")
- }
-}
-
////////////////////////////////////////////////////////////////////////////////
// Thread
////////////////////////////////////////////////////////////////////////////////
///
/// use std::thread;
///
- /// let handler = thread::Builder::new()
- /// .spawn(|| {
- /// let thread = thread::current();
- /// println!("thread id: {:?}", thread.id());
- /// })
- /// .unwrap();
+ /// let other_thread = thread::spawn(|| {
+ /// thread::current().id()
+ /// });
///
- /// handler.join().unwrap();
+ /// let other_thread_id = other_thread.join().unwrap();
+ /// assert!(thread::current().id() != other_thread_id);
/// ```
#[unstable(feature = "thread_id", issue = "21507")]
pub fn id(&self) -> ThreadId {
pub items: Vec<ForeignItem>,
}
+/// Global inline assembly
+///
+/// aka module-level assembly or file-scoped assembly
+#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
+pub struct GlobalAsm {
+ pub asm: Symbol,
+ pub ctxt: SyntaxContext,
+}
+
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct EnumDef {
pub variants: Vec<Variant>,
///
/// E.g. `extern {}` or `extern "C" {}`
ForeignMod(ForeignMod),
+ /// Module-level inline assembly (from `global_asm!()`)
+ GlobalAsm(P<GlobalAsm>),
/// A type alias (`type` or `pub type`).
///
/// E.g. `type Foo = Bar<u8>;`
ItemKind::Fn(..) => "function",
ItemKind::Mod(..) => "module",
ItemKind::ForeignMod(..) => "foreign module",
+ ItemKind::GlobalAsm(..) => "global asm",
ItemKind::Ty(..) => "type alias",
ItemKind::Enum(..) => "enum",
ItemKind::Struct(..) => "struct",
feature_tests! {
fn enable_quotes = quote,
fn enable_asm = asm,
+ fn enable_global_asm = global_asm,
fn enable_log_syntax = log_syntax,
fn enable_concat_idents = concat_idents,
fn enable_trace_macros = trace_macros,
token::NtPath(panictry!(p.parse_path(PathStyle::Type)))
},
"meta" => token::NtMeta(panictry!(p.parse_meta_item())),
+ "vis" => token::NtVis(panictry!(p.parse_visibility(true))),
// this is not supposed to happen, since it has been checked
// when compiling the macro.
_ => p.span_bug(sp, "invalid fragment specifier")
use ext::tt::macro_parser::{parse, parse_failure_msg};
use ext::tt::quoted;
use ext::tt::transcribe::transcribe;
+use feature_gate::{self, emit_feature_err, Features, GateIssue};
use parse::{Directory, ParseSess};
use parse::parser::Parser;
use parse::token::{self, NtTT};
use symbol::Symbol;
use tokenstream::{TokenStream, TokenTree};
+use std::cell::RefCell;
use std::collections::{HashMap};
use std::collections::hash_map::{Entry};
use std::rc::Rc;
// Holy self-referential!
/// Converts a `macro_rules!` invocation into a syntax extension.
-pub fn compile(sess: &ParseSess, def: &ast::Item) -> SyntaxExtension {
+pub fn compile(sess: &ParseSess, features: &RefCell<Features>, def: &ast::Item) -> SyntaxExtension {
let lhs_nm = ast::Ident::with_empty_ctxt(Symbol::gensym("lhs"));
let rhs_nm = ast::Ident::with_empty_ctxt(Symbol::gensym("rhs"));
if let MatchedNonterminal(ref nt) = **m {
if let NtTT(ref tt) = **nt {
let tt = quoted::parse(tt.clone().into(), true, sess).pop().unwrap();
- valid &= check_lhs_nt_follows(sess, &tt);
+ valid &= check_lhs_nt_follows(sess, features, &tt);
return tt;
}
}
NormalTT(exp, Some(def.span), attr::contains_name(&def.attrs, "allow_internal_unstable"))
}
-fn check_lhs_nt_follows(sess: &ParseSess, lhs: "ed::TokenTree) -> bool {
+fn check_lhs_nt_follows(sess: &ParseSess,
+ features: &RefCell<Features>,
+ lhs: "ed::TokenTree) -> bool {
// lhs is going to be like TokenTree::Delimited(...), where the
// entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
match lhs {
- "ed::TokenTree::Delimited(_, ref tts) => check_matcher(sess, &tts.tts),
+ "ed::TokenTree::Delimited(_, ref tts) => check_matcher(sess, features, &tts.tts),
_ => {
let msg = "invalid macro matcher; matchers must be contained in balanced delimiters";
sess.span_diagnostic.span_err(lhs.span(), msg);
false
}
-fn check_matcher(sess: &ParseSess, matcher: &[quoted::TokenTree]) -> bool {
+fn check_matcher(sess: &ParseSess,
+ features: &RefCell<Features>,
+ matcher: &[quoted::TokenTree]) -> bool {
let first_sets = FirstSets::new(matcher);
let empty_suffix = TokenSet::empty();
let err = sess.span_diagnostic.err_count();
- check_matcher_core(sess, &first_sets, matcher, &empty_suffix);
+ check_matcher_core(sess, features, &first_sets, matcher, &empty_suffix);
err == sess.span_diagnostic.err_count()
}
// Requires that `first_sets` is pre-computed for `matcher`;
// see `FirstSets::new`.
fn check_matcher_core(sess: &ParseSess,
+ features: &RefCell<Features>,
first_sets: &FirstSets,
matcher: &[quoted::TokenTree],
follow: &TokenSet) -> TokenSet {
match *token {
TokenTree::Token(..) | TokenTree::MetaVarDecl(..) => {
let can_be_followed_by_any;
- if let Err(bad_frag) = has_legal_fragment_specifier(token) {
+ if let Err(bad_frag) = has_legal_fragment_specifier(sess, features, token) {
let msg = format!("invalid fragment specifier `{}`", bad_frag);
sess.span_diagnostic.struct_span_err(token.span(), &msg)
- .help("valid fragment specifiers are `ident`, `block`, \
- `stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
- and `item`")
+ .help("valid fragment specifiers are `ident`, `block`, `stmt`, `expr`, \
+ `pat`, `ty`, `path`, `meta`, `tt`, `item` and `vis`")
.emit();
// (This eliminates false positives and duplicates
// from error messages.)
}
TokenTree::Delimited(span, ref d) => {
let my_suffix = TokenSet::singleton(d.close_tt(span));
- check_matcher_core(sess, first_sets, &d.tts, &my_suffix);
+ check_matcher_core(sess, features, first_sets, &d.tts, &my_suffix);
// don't track non NT tokens
last.replace_with_irrelevant();
// At this point, `suffix_first` is built, and
// `my_suffix` is some TokenSet that we can use
// for checking the interior of `seq_rep`.
- let next = check_matcher_core(sess, first_sets, &seq_rep.tts, my_suffix);
+ let next = check_matcher_core(sess, features, first_sets, &seq_rep.tts, my_suffix);
if next.maybe_empty {
last.add_all(&next);
} else {
// harmless
Ok(true)
},
+ "vis" => {
+ // Explicitly disallow `priv`, on the off chance it comes back.
+ match *tok {
+ TokenTree::Token(_, ref tok) => match *tok {
+ Comma => Ok(true),
+ Ident(i) if i.name != "priv" => Ok(true),
+ ref tok => Ok(tok.can_begin_type())
+ },
+ TokenTree::MetaVarDecl(_, _, frag) if frag.name == "ident"
+ || frag.name == "ty"
+ || frag.name == "path" => Ok(true),
+ _ => Ok(false)
+ }
+ },
"" => Ok(true), // keywords::Invalid
_ => Err((format!("invalid fragment specifier `{}`", frag),
"valid fragment specifiers are `ident`, `block`, \
- `stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
- and `item`"))
+ `stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt`, \
+ `item` and `vis`"))
}
}
}
-fn has_legal_fragment_specifier(tok: "ed::TokenTree) -> Result<(), String> {
+fn has_legal_fragment_specifier(sess: &ParseSess,
+ features: &RefCell<Features>,
+ tok: "ed::TokenTree) -> Result<(), String> {
debug!("has_legal_fragment_specifier({:?})", tok);
- if let quoted::TokenTree::MetaVarDecl(_, _, frag_spec) = *tok {
- let s = &frag_spec.name.as_str();
- if !is_legal_fragment_specifier(s) {
- return Err(s.to_string());
+ if let quoted::TokenTree::MetaVarDecl(_, _, ref frag_spec) = *tok {
+ let frag_name = frag_spec.name.as_str();
+ let frag_span = tok.span();
+ if !is_legal_fragment_specifier(sess, features, &frag_name, frag_span) {
+ return Err(frag_name.to_string());
}
}
Ok(())
}
-fn is_legal_fragment_specifier(frag: &str) -> bool {
- match frag {
+fn is_legal_fragment_specifier(sess: &ParseSess,
+ features: &RefCell<Features>,
+ frag_name: &str,
+ frag_span: Span) -> bool {
+ match frag_name {
"item" | "block" | "stmt" | "expr" | "pat" |
"path" | "ty" | "ident" | "meta" | "tt" | "" => true,
+ "vis" => {
+ if !features.borrow().macro_vis_matcher {
+ let explain = feature_gate::EXPLAIN_VIS_MATCHER;
+ emit_feature_err(sess,
+ "macro_vis_matcher",
+ frag_span,
+ GateIssue::Language,
+ explain);
+ }
+ true
+ },
_ => false,
}
}
// Hack to document `-Z linker-flavor` in The Unstable Book
(active, linker_flavor, "1.18.0", Some(41142)),
+
+ // Allows module-level inline assembly by way of global_asm!()
+ (active, global_asm, "1.18.0", Some(35119)),
+
+ // Allows overlapping impls of marker traits
+ (active, overlapping_marker_traits, "1.18.0", Some(29864)),
+
+ // Allows use of the :vis macro fragment specifier
+ (active, macro_vis_matcher, "1.18.0", Some(41022)),
);
declare_features! (
pub const EXPLAIN_ASM: &'static str =
"inline assembly is not stable enough for use and is subject to change";
+pub const EXPLAIN_GLOBAL_ASM: &'static str =
+ "`global_asm!` is not stable enough for use and is subject to change";
+
pub const EXPLAIN_LOG_SYNTAX: &'static str =
"`log_syntax!` is not stable enough for use and is subject to change";
pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str =
"attributes of the form `#[derive_*]` are reserved for the compiler";
+pub const EXPLAIN_VIS_MATCHER: &'static str =
+ ":vis fragment specifier is experimental and subject to change";
+
pub const EXPLAIN_PLACEMENT_IN: &'static str =
"placement-in expression syntax is experimental and subject to change.";
noop_fold_foreign_mod(nm, self)
}
+ fn fold_global_asm(&mut self, ga: P<GlobalAsm>) -> P<GlobalAsm> {
+ noop_fold_global_asm(ga, self)
+ }
+
fn fold_variant(&mut self, v: Variant) -> Variant {
noop_fold_variant(v, self)
}
}
}
+pub fn noop_fold_global_asm<T: Folder>(ga: P<GlobalAsm>,
+ _: &mut T) -> P<GlobalAsm> {
+ ga
+}
+
pub fn noop_fold_variant<T: Folder>(v: Variant, fld: &mut T) -> Variant {
Spanned {
node: Variant_ {
token::NtWhereClause(where_clause) =>
token::NtWhereClause(fld.fold_where_clause(where_clause)),
token::NtArg(arg) => token::NtArg(fld.fold_arg(arg)),
+ token::NtVis(vis) => token::NtVis(fld.fold_vis(vis)),
}
}
}
ItemKind::Mod(m) => ItemKind::Mod(folder.fold_mod(m)),
ItemKind::ForeignMod(nm) => ItemKind::ForeignMod(folder.fold_foreign_mod(nm)),
+ ItemKind::GlobalAsm(ga) => ItemKind::GlobalAsm(folder.fold_global_asm(ga)),
ItemKind::Ty(t, generics) => {
ItemKind::Ty(folder.fold_ty(t), folder.fold_generics(generics))
}
}
token::Literal(token::Float(n), _suf) => {
self.bump();
- let prev_span = self.prev_span;
let fstr = n.as_str();
- let mut err = self.diagnostic().struct_span_err(prev_span,
+ let mut err = self.diagnostic().struct_span_err(self.prev_span,
&format!("unexpected token: `{}`", n));
+ err.span_label(self.prev_span, &"unexpected token");
if fstr.chars().all(|x| "0123456789.".contains(x)) {
let float = match fstr.parse::<f64>().ok() {
Some(f) => f,
word(&mut s.s, fstr.splitn(2, ".").last().unwrap())
});
err.span_suggestion(
- prev_span,
+ lo.to(self.prev_span),
"try parenthesizing the first index",
sugg);
}
}).emit();
}
- // Parse bounds of a type parameter `BOUND + BOUND + BOUND` without trailing `+`.
+ // Parse bounds of a type parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`.
// BOUND = TY_BOUND | LT_BOUND
// LT_BOUND = LIFETIME (e.g. `'a`)
// TY_BOUND = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`)
self.parse_ty_param_bounds_common(true)
}
- // Parse bounds of a type parameter `BOUND + BOUND + BOUND` without trailing `+`.
+ // Parse bounds of a lifetime parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`.
// BOUND = LT_BOUND (e.g. `'a`)
fn parse_lt_param_bounds(&mut self) -> Vec<Lifetime> {
let mut lifetimes = Vec::new();
/// and `pub(super)` for `pub(in super)`. If the following element can't be a tuple (i.e. it's
/// a function definition, it's not a tuple struct field) and the contents within the parens
/// isn't valid, emit a proper diagnostic.
- fn parse_visibility(&mut self, can_take_tuple: bool) -> PResult<'a, Visibility> {
+ pub fn parse_visibility(&mut self, can_take_tuple: bool) -> PResult<'a, Visibility> {
+ maybe_whole!(self, NtVis, |x| x);
+
if !self.eat_keyword(keywords::Pub) {
return Ok(Visibility::Inherited)
}
/// Stuff inside brackets for attributes
NtMeta(ast::MetaItem),
NtPath(ast::Path),
+ NtVis(ast::Visibility),
NtTT(TokenTree),
// These are not exposed to macros, but are used by quasiquote.
NtArm(ast::Arm),
NtGenerics(..) => f.pad("NtGenerics(..)"),
NtWhereClause(..) => f.pad("NtWhereClause(..)"),
NtArg(..) => f.pad("NtArg(..)"),
+ NtVis(..) => f.pad("NtVis(..)"),
}
}
}
token::NtGenerics(ref e) => generics_to_string(&e),
token::NtWhereClause(ref e) => where_clause_to_string(&e),
token::NtArg(ref e) => arg_to_string(&e),
+ token::NtVis(ref e) => vis_to_string(&e),
}
}
}
to_string(|s| s.print_ident(id))
}
+pub fn vis_to_string(v: &ast::Visibility) -> String {
+ to_string(|s| s.print_visibility(v))
+}
+
pub fn fun_to_string(decl: &ast::FnDecl,
unsafety: ast::Unsafety,
constness: ast::Constness,
}
pub fn visibility_qualified(vis: &ast::Visibility, s: &str) -> String {
- match *vis {
- ast::Visibility::Public => format!("pub {}", s),
- ast::Visibility::Crate(_) => format!("pub(crate) {}", s),
- ast::Visibility::Restricted { ref path, .. } =>
- format!("pub({}) {}", to_string(|s| s.print_path(path, false, 0, true)), s),
- ast::Visibility::Inherited => s.to_string()
- }
+ format!("{}{}", to_string(|s| s.print_visibility(vis)), s)
}
fn needs_parentheses(expr: &ast::Expr) -> bool {
self.print_foreign_mod(nmod, &item.attrs)?;
self.bclose(item.span)?;
}
+ ast::ItemKind::GlobalAsm(ref ga) => {
+ self.head(&visibility_qualified(&item.vis, "global_asm!"))?;
+ word(&mut self.s, &ga.asm.as_str())?;
+ self.end()?;
+ }
ast::ItemKind::Ty(ref ty, ref params) => {
self.ibox(INDENT_UNIT)?;
self.ibox(0)?;
ast::Visibility::Crate(_) => self.word_nbsp("pub(crate)"),
ast::Visibility::Restricted { ref path, .. } => {
let path = to_string(|s| s.print_path(path, false, 0, true));
- self.word_nbsp(&format!("pub({})", path))
+ if path == "self" || path == "super" {
+ self.word_nbsp(&format!("pub({})", path))
+ } else {
+ self.word_nbsp(&format!("pub(in {})", path))
+ }
}
ast::Visibility::Inherited => Ok(())
}
}
fn visit_mod(&mut self, m: &'ast Mod, _s: Span, _n: NodeId) { walk_mod(self, m) }
fn visit_foreign_item(&mut self, i: &'ast ForeignItem) { walk_foreign_item(self, i) }
+ fn visit_global_asm(&mut self, ga: &'ast GlobalAsm) { walk_global_asm(self, ga) }
fn visit_item(&mut self, i: &'ast Item) { walk_item(self, i) }
fn visit_local(&mut self, l: &'ast Local) { walk_local(self, l) }
fn visit_block(&mut self, b: &'ast Block) { walk_block(self, b) }
ItemKind::ForeignMod(ref foreign_module) => {
walk_list!(visitor, visit_foreign_item, &foreign_module.items);
}
+ ItemKind::GlobalAsm(ref ga) => visitor.visit_global_asm(ga),
ItemKind::Ty(ref typ, ref type_parameters) => {
visitor.visit_ty(typ);
visitor.visit_generics(type_parameters)
walk_list!(visitor, visit_attribute, &foreign_item.attrs);
}
+pub fn walk_global_asm<'a, V: Visitor<'a>>(_: &mut V, _: &'a GlobalAsm) {
+ // Empty!
+}
+
pub fn walk_ty_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a TyParamBound) {
match *bound {
TraitTyParamBound(ref typ, ref modifier) => {
--- /dev/null
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/// Module-level assembly support.
+///
+/// The macro defined here allows you to specify "top-level",
+/// "file-scoped", or "module-level" assembly. These synonyms
+/// all correspond to LLVM's module-level inline assembly instruction.
+///
+/// For example, `global_asm!("some assembly here")` translates to
+/// LLVM's `module asm "some assembly here"`. All of LLVM's caveats
+/// therefore apply.
+
+use syntax::ast;
+use syntax::ext::base;
+use syntax::ext::base::*;
+use syntax::feature_gate;
+use syntax::ptr::P;
+use syntax::symbol::Symbol;
+use syntax_pos::Span;
+use syntax::tokenstream;
+
+use syntax::util::small_vector::SmallVector;
+
+pub const MACRO: &'static str = "global_asm";
+
+pub fn expand_global_asm<'cx>(cx: &'cx mut ExtCtxt,
+ sp: Span,
+ tts: &[tokenstream::TokenTree]) -> Box<base::MacResult + 'cx> {
+ if !cx.ecfg.enable_global_asm() {
+ feature_gate::emit_feature_err(&cx.parse_sess,
+ MACRO,
+ sp,
+ feature_gate::GateIssue::Language,
+ feature_gate::EXPLAIN_GLOBAL_ASM);
+ return DummyResult::any(sp);
+ }
+
+ let mut p = cx.new_parser_from_tts(tts);
+ let (asm, _) = match expr_to_string(cx,
+ panictry!(p.parse_expr()),
+ "inline assembly must be a string literal") {
+ Some((s, st)) => (s, st),
+ None => return DummyResult::any(sp),
+ };
+
+ MacEager::items(SmallVector::one(P(ast::Item {
+ ident: ast::Ident::with_empty_ctxt(Symbol::intern("")),
+ attrs: Vec::new(),
+ id: ast::DUMMY_NODE_ID,
+ node: ast::ItemKind::GlobalAsm(P(ast::GlobalAsm {
+ asm: asm,
+ ctxt: cx.backtrace(),
+ })),
+ vis: ast::Visibility::Inherited,
+ span: sp,
+ })))
+}
mod env;
mod format;
mod format_foreign;
+mod global_asm;
mod log_syntax;
mod trace_macros;
module_path: expand_mod,
asm: asm::expand_asm,
+ global_asm: global_asm::expand_global_asm,
cfg: cfg::expand_cfg,
concat: concat::expand_syntax_ext,
concat_idents: concat_idents::expand_syntax_ext,
--- /dev/null
+Subproject commit da282f1bb7277b4d30fa1599ee29ad8eb4dd2a92
HasSideEffects, IsAlignStack, fromRust(Dialect)));
}
+extern "C" void LLVMRustAppendModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
+ unwrap(M)->appendModuleInlineAsm(StringRef(Asm));
+}
+
typedef DIBuilder *LLVMRustDIBuilderRef;
typedef struct LLVMOpaqueMetadata *LLVMRustMetadataRef;
--- /dev/null
+.global foo
+foo:
+ jmp baz
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-aarch64
+// ignore-aarch64_be
+// ignore-arm
+// ignore-armeb
+// ignore-avr
+// ignore-bpfel
+// ignore-bpfeb
+// ignore-hexagon
+// ignore-mips
+// ignore-mipsel
+// ignore-mips64
+// ignore-mips64el
+// ignore-msp430
+// ignore-powerpc64
+// ignore-powerpc64le
+// ignore-powerpc
+// ignore-r600
+// ignore-amdgcn
+// ignore-sparc
+// ignore-sparcv9
+// ignore-sparcel
+// ignore-s390x
+// ignore-tce
+// ignore-thumb
+// ignore-thumbeb
+// ignore-xcore
+// ignore-nvptx
+// ignore-nvptx64
+// ignore-le32
+// ignore-le64
+// ignore-amdil
+// ignore-amdil64
+// ignore-hsail
+// ignore-hsail64
+// ignore-spir
+// ignore-spir64
+// ignore-kalimba
+// ignore-shave
+// ignore-wasm32
+// ignore-wasm64
+// ignore-emscripten
+// compile-flags: -C no-prepopulate-passes
+
+#![feature(global_asm)]
+#![crate_type = "lib"]
+
+// CHECK-LABEL: foo
+// CHECK: module asm
+// this regex will capture the correct unconditional branch inst.
+// CHECK: module asm "{{[[:space:]]+}}jmp baz"
+global_asm!(r#"
+ .global foo
+foo:
+ jmp baz
+"#);
+
+extern "C" {
+ fn foo();
+}
+
+// CHECK-LABEL: @baz
+#[no_mangle]
+pub unsafe extern "C" fn baz() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-aarch64
+// ignore-aarch64_be
+// ignore-arm
+// ignore-armeb
+// ignore-avr
+// ignore-bpfel
+// ignore-bpfeb
+// ignore-hexagon
+// ignore-mips
+// ignore-mipsel
+// ignore-mips64
+// ignore-mips64el
+// ignore-msp430
+// ignore-powerpc64
+// ignore-powerpc64le
+// ignore-powerpc
+// ignore-r600
+// ignore-amdgcn
+// ignore-sparc
+// ignore-sparcv9
+// ignore-sparcel
+// ignore-s390x
+// ignore-tce
+// ignore-thumb
+// ignore-thumbeb
+// ignore-xcore
+// ignore-nvptx
+// ignore-nvptx64
+// ignore-le32
+// ignore-le64
+// ignore-amdil
+// ignore-amdil64
+// ignore-hsail
+// ignore-hsail64
+// ignore-spir
+// ignore-spir64
+// ignore-kalimba
+// ignore-shave
+// ignore-wasm32
+// ignore-wasm64
+// ignore-emscripten
+// compile-flags: -C no-prepopulate-passes
+
+#![feature(global_asm)]
+#![crate_type = "lib"]
+
+// CHECK-LABEL: foo
+// CHECK: module asm
+// CHECK: module asm "{{[[:space:]]+}}jmp baz"
+global_asm!(include_str!("foo.s"));
+
+extern "C" {
+ fn foo();
+}
+
+// CHECK-LABEL: @baz
+#[no_mangle]
+pub unsafe extern "C" fn baz() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-aarch64
+// ignore-aarch64_be
+// ignore-arm
+// ignore-armeb
+// ignore-avr
+// ignore-bpfel
+// ignore-bpfeb
+// ignore-hexagon
+// ignore-mips
+// ignore-mipsel
+// ignore-mips64
+// ignore-mips64el
+// ignore-msp430
+// ignore-powerpc64
+// ignore-powerpc64le
+// ignore-powerpc
+// ignore-r600
+// ignore-amdgcn
+// ignore-sparc
+// ignore-sparcv9
+// ignore-sparcel
+// ignore-s390x
+// ignore-tce
+// ignore-thumb
+// ignore-thumbeb
+// ignore-xcore
+// ignore-nvptx
+// ignore-nvptx64
+// ignore-le32
+// ignore-le64
+// ignore-amdil
+// ignore-amdil64
+// ignore-hsail
+// ignore-hsail64
+// ignore-spir
+// ignore-spir64
+// ignore-kalimba
+// ignore-shave
+// ignore-wasm32
+// ignore-wasm64
+// ignore-emscripten
+// compile-flags: -C no-prepopulate-passes
+
+#![feature(global_asm)]
+#![crate_type = "lib"]
+#[no_std]
+
+// CHECK-LABEL: foo
+// CHECK: module asm
+// CHECK: module asm "{{[[:space:]]+}}jmp baz"
+// any other global_asm will be appended to this first block, so:
+// CHECK-LABEL: bar
+// CHECK: module asm "{{[[:space:]]+}}jmp quux"
+global_asm!(r#"
+ .global foo
+foo:
+ jmp baz
+"#);
+
+extern "C" {
+ fn foo();
+}
+
+// CHECK-LABEL: @baz
+#[no_mangle]
+pub unsafe extern "C" fn baz() {}
+
+// no checks here; this has been appended to the first occurrence
+global_asm!(r#"
+ .global bar
+bar:
+ jmp quux
+"#);
+
+extern "C" {
+ fn bar();
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn quux() {}
#[no_mangle]
#[naked]
fn naked_empty() {
- // CHECK: ret void
+ // CHECK-NEXT: {{.+}}:
+ // CHECK-NEXT: ret void
}
// CHECK: Function Attrs: naked uwtable
#[naked]
// CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}})
fn naked_with_args(a: isize) {
- // CHECK: %a = alloca i{{[0-9]+}}
- // CHECK: ret void
+ // CHECK-NEXT: {{.+}}:
+ // CHECK-NEXT: %a = alloca i{{[0-9]+}}
&a; // keep variable in an alloca
+ // CHECK: ret void
}
// CHECK: Function Attrs: naked uwtable
#[no_mangle]
#[naked]
fn naked_with_return() -> isize {
- // CHECK: ret i{{[0-9]+}} 0
+ // CHECK-NEXT: {{.+}}:
+ // CHECK-NEXT: ret i{{[0-9]+}} 0
0
}
#[no_mangle]
#[naked]
fn naked_with_args_and_return(a: isize) -> isize {
- // CHECK: %a = alloca i{{[0-9]+}}
- // CHECK: ret i{{[0-9]+}} %{{[0-9]+}}
+ // CHECK-NEXT: {{.+}}:
+ // CHECK-NEXT: %a = alloca i{{[0-9]+}}
&a; // keep variable in an alloca
+ // CHECK: ret i{{[0-9]+}} %{{[0-9]+}}
a
}
#[no_mangle]
#[naked]
fn naked_recursive() {
- // CHECK: call void @naked_empty()
+ // CHECK-NEXT: {{.+}}:
+ // CHECK-NEXT: call void @naked_empty()
+
+ // FIXME(#39685) Avoid one block per call.
+ // CHECK-NEXT: br label %bb1
+ // CHECK: bb1:
+
naked_empty();
- // CHECK: %{{[0-9]+}} = call i{{[0-9]+}} @naked_with_return()
+
+ // CHECK-NEXT: %{{[0-9]+}} = call i{{[0-9]+}} @naked_with_return()
+
+ // FIXME(#39685) Avoid one block per call.
+ // CHECK-NEXT: br label %bb2
+ // CHECK: bb2:
+
+ // CHECK-NEXT: %{{[0-9]+}} = call i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}} %{{[0-9]+}})
+
+ // FIXME(#39685) Avoid one block per call.
+ // CHECK-NEXT: br label %bb3
+ // CHECK: bb3:
+
+ // CHECK-NEXT: call void @naked_with_args(i{{[0-9]+}} %{{[0-9]+}})
+
+ // FIXME(#39685) Avoid one block per call.
+ // CHECK-NEXT: br label %bb4
+ // CHECK: bb4:
+
naked_with_args(
- // CHECK: %{{[0-9]+}} = call i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}} %{{[0-9]+}})
naked_with_args_and_return(
- // CHECK: call void @naked_with_args(i{{[0-9]+}} %{{[0-9]+}})
naked_with_return()
)
);
+ // CHECK-NEXT: ret void
}
fn main() {
let x = [];
- //~^ ERROR E0102
- //~| NOTE cannot resolve type of variable
+ //~^ ERROR type annotations needed
+ //~| NOTE consider giving `x` a type
+ //~| NOTE cannot infer type for `_`
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-trait MyTrait {}
+trait MyTrait { fn foo() {} }
impl Drop for MyTrait {
//~^ ERROR E0120
// except according to those terms.
pub trait Foo {
+ fn foo() {}
}
impl Foo for isize {
&mut f
+
&f; //~ ERROR: cannot borrow `f` as immutable because it is also borrowed as mutable
- //~^ cannot borrow `f` as immutable because it is also borrowed as mutable
}
fn immut_plus_mut() {
&f
+
&mut f; //~ ERROR: cannot borrow `f` as mutable because it is also borrowed as immutable
- //~^ cannot borrow `f` as mutable because it is also borrowed as immutable
}
fn main() {}
borrow(&*v); //~ ERROR cannot borrow
if cond2 {
x = &mut v; //~ ERROR cannot borrow
- //~^ ERROR cannot borrow
}
}
}
match 1 {
1 => { addr = &mut x; }
//~^ ERROR cannot borrow `x` as mutable more than once at a time
- //~| ERROR cannot borrow `x` as mutable more than once at a time
2 => { addr = &mut x; }
//~^ ERROR cannot borrow `x` as mutable more than once at a time
_ => { addr = &mut x; }
// except according to those terms.
#![feature(optin_builtin_traits)]
+#![feature(overlapping_marker_traits)]
trait MyTrait {}
//~^ ERROR conflicting implementations of trait `std::marker::Send`
unsafe impl<T:'static> Send for TestType<T> {}
-//~^ ERROR conflicting implementations of trait `std::marker::Send`
impl !Send for TestType<i32> {}
+//~^ ERROR conflicting implementations of trait `std::marker::Send`
fn main() {}
#![feature(optin_builtin_traits)]
-trait MyTrait {}
+trait MyTrait { fn foo() {} }
impl MyTrait for .. {}
//~^ ERROR redundant default implementations of trait `MyTrait`
// except according to those terms.
#![feature(optin_builtin_traits)]
+#![feature(overlapping_marker_traits)]
use std::marker::Copy;
unsafe impl Send for &'static [NotSync] {}
//~^ ERROR E0117
-//~| ERROR E0119
fn main() {
}
// Test that you cannot *directly* dispatch on lifetime requirements
-trait MyTrait {}
+trait MyTrait { fn foo() {} }
impl<T> MyTrait for T {}
impl<T: 'static> MyTrait for T {} //~ ERROR E0119
// Seems pretty basic, but then there was issue #24241. :)
trait From<U> {
+ fn foo() {}
}
impl <T> From<T> for T {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-trait Foo {}
+trait Foo { fn foo() {} }
impl<T> Foo for T {}
impl<U> Foo for U {} //~ ERROR conflicting implementations of trait `Foo`:
-trait Bar {}
+trait Bar { fn bar() {} }
impl<T> Bar for (T, u8) {}
impl<T> Bar for (u8, T) {} //~ ERROR conflicting implementations of trait `Bar` for type `(u8, u8)`:
-trait Baz<T> {}
+trait Baz<T> { fn baz() {} }
impl<T> Baz<u8> for T {}
impl<T> Baz<T> for u8 {} //~ ERROR conflicting implementations of trait `Baz<u8>` for type `u8`:
-trait Quux<U, V> {}
+trait Quux<U, V> { fn quux() {} }
impl<T, U, V> Quux<U, V> for T {}
impl<T, U> Quux<U, U> for T {} //~ ERROR conflicting implementations of trait `Quux<_, _>`:
// due to the orphan rules. Therefore, `A::Item` may yet turn out to
// be `i32`.
-pub trait Foo<P> {}
+pub trait Foo<P> { fn foo() {} }
pub trait Bar {
type Output: 'static;
use std::marker::PhantomData;
-pub trait Foo<P> {}
+pub trait Foo<P> { fn foo() {} }
impl <P, T: Foo<P>> Foo<P> for Option<T> {}
use std::marker::PhantomData;
-pub trait Foo<P> {}
+pub trait Foo<P> { fn foo() {} }
pub trait Bar {
type Output: 'static;
struct MyType { x: i32 }
-trait MyTrait { }
+trait MyTrait { fn foo() {} }
impl<T: lib::MyCopy> MyTrait for T { }
// `MyFundamentalStruct` is declared fundamental, so we can test that
struct MyType { x: i32 }
-trait MyTrait { }
+trait MyTrait { fn foo() {} }
impl<T: lib::MyCopy> MyTrait for T { }
// `MyFundamentalStruct` is declared fundamental, so we can test that
struct MyType { x: i32 }
-trait MyTrait { }
+trait MyTrait { fn foo() {} }
impl<T: lib::MyCopy> MyTrait for T { }
struct MyType { x: i32 }
-trait MyTrait { }
+trait MyTrait { fn foo() {} }
impl<T: lib::MyCopy> MyTrait for T { }
// `MyStruct` is not declared fundamental, therefore this would
struct MyType { x: i32 }
-trait MyTrait { }
+trait MyTrait { fn foo() {} }
impl<T: lib::MyCopy> MyTrait for T { }
// Tuples are not fundamental, therefore this would require that
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(default_type_parameter_fallback)]
-
-use std::fmt::Debug;
-
-// Example from the RFC
-fn foo<F:Default=usize>() -> F { F::default() }
-//~^ NOTE: a default was defined here...
-
-fn bar<B:Debug=isize>(b: B) { println!("{:?}", b); }
-//~^ NOTE: a second default was defined here...
-
-fn main() {
- // Here, F is instantiated with $0=uint
- let x = foo();
- //~^ ERROR: mismatched types
- //~| NOTE: conflicting type parameter defaults `usize` and `isize`
- //~| NOTE: conflicting type parameter defaults `usize` and `isize`
- //~| NOTE: ...that was applied to an unconstrained type variable here
-
- // Here, B is instantiated with $1=uint, and constraint $0 <: $1 is added.
- bar(x);
- //~^ NOTE: ...that also applies to the same type variable here
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-//aux-build:default_ty_param_cross_crate_crate.rs
-
-#![feature(default_type_parameter_fallback)]
-
-extern crate default_param_test;
-
-use default_param_test::{Foo, bleh};
-
-fn meh<X, B=bool>(x: Foo<X, B>) {}
-//~^ NOTE: a default was defined here...
-
-fn main() {
- let foo = bleh();
- //~^ NOTE: ...that also applies to the same type variable here
-
- meh(foo);
- //~^ ERROR: mismatched types
- //~| NOTE: conflicting type parameter defaults `bool` and `char`
- //~| NOTE: conflicting type parameter defaults `bool` and `char`
- //~| a second default is defined on `default_param_test::bleh`
- //~| NOTE: ...that was applied to an unconstrained type variable here
-}
// n == m
let &x = &1isize as &T; //~ ERROR type `&T` cannot be dereferenced
let &&x = &(&1isize as &T); //~ ERROR type `&T` cannot be dereferenced
- let box x = box 1isize as Box<T>; //~ ERROR `T: std::marker::Sized` is not satisfied
+ let box x = box 1isize as Box<T>; //~ ERROR type `std::boxed::Box<T>` cannot be dereferenced
// n > m
let &&x = &1isize as &T;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// gate-test-global_asm
+
+global_asm!(""); //~ ERROR `global_asm!` is not stable
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the MSP430 interrupt ABI cannot be used when msp430_interrupt
+// feature gate is not used.
+
+macro_rules! m { ($v:vis) => {} }
+//~^ ERROR :vis fragment specifier is experimental and subject to change
+
+fn main() {
+ m!(pub);
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt::{Debug, Display};
+
+trait MyMarker {}
+
+impl<T: Display> MyMarker for T {}
+impl<T: Debug> MyMarker for T {}
+//~^ ERROR E0119
+
+fn main() {}
#![feature(core_intrinsics)]
-use std::intrinsics::{init, forget};
+use std::intrinsics::{init};
// Test that the `forget` and `init` intrinsics are really unsafe
pub fn main() {
let stuff = init::<isize>(); //~ ERROR call to unsafe function requires unsafe
- forget(stuff); //~ ERROR call to unsafe function requires unsafe
}
let &v = new();
//~^ ERROR type annotations needed [E0282]
//~| NOTE cannot infer type for `_`
+ //~| NOTE consider giving a type to pattern
}
let &v = new();
//~^ ERROR type annotations needed [E0282]
//~| NOTE cannot infer type for `_`
+ //~| NOTE consider giving a type to pattern
}
loop { match l {
&mut Sexpression::Num(ref mut n) => {},
&mut Sexpression::Cons(ref mut expr) => { //~ ERROR cannot borrow `l.0`
- //~| ERROR cannot borrow `l.0`
l = &mut **expr; //~ ERROR cannot assign to `l`
}
}}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #30225, which was an ICE that would trigger as
+// a result of a poor interaction between trait result caching and
+// type inference. Specifically, at that time, unification could cause
+// unrelated type variables to become instantiated, if subtyping
+// relationships existed. These relationships are now propagated
+// through obligations and hence everything works out fine.
+
+trait Foo<U,V> : Sized {
+ fn foo(self, u: Option<U>, v: Option<V>) {}
+}
+
+struct A;
+struct B;
+
+impl Foo<A, B> for () {} // impl A
+impl Foo<u32, u32> for u32 {} // impl B, creating ambiguity
+
+fn toxic() {
+ // cache the resolution <() as Foo<$0,$1>> = impl A
+ let u = None;
+ let v = None;
+ Foo::foo((), u, v);
+}
+
+fn bomb() {
+ let mut u = None; // type is Option<$0>
+ let mut v = None; // type is Option<$1>
+ let mut x = None; // type is Option<$2>
+
+ Foo::foo(x.unwrap(),u,v); // register <$2 as Foo<$0, $1>>
+ u = v; // mark $0 and $1 in a subtype relationship
+ //~^ ERROR mismatched types
+ x = Some(()); // set $2 = (), allowing impl selection
+ // to proceed for <() as Foo<$0, $1>> = impl A.
+ // kaboom, this *used* to trigge an ICE
+}
+
+fn main() {}
fn main() {
let Box(a) = loop { };
//~^ ERROR expected tuple struct/variant, found struct `Box`
- //~| ERROR expected tuple struct/variant, found struct `Box`
// (The below is a trick to allow compiler to infer a type for
// variable `a` without attempting to ascribe a type to the
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Trait {}
+
+fn get_function<'a>() -> &'a Fn() -> Trait { panic!("") }
+
+fn main() {
+ let t : &Trait = &get_function()();
+ //~^ ERROR cannot move a value of type Trait + 'static
+}
// except according to those terms.
fn main() {
- let v = &[];
- let it = v.iter(); //~ ERROR type annotations needed [E0282]
- //~| NOTE cannot infer type for `T`
- //~| NOTE consider giving `it` a type
+ let v = &[]; //~ ERROR type annotations needed
+ //~| NOTE consider giving `v` a type
+ //~| NOTE cannot infer type for `_`
+ let it = v.iter();
}
// Should get errors for both 'Some' and 'None'
use std::option::Option::{Some, None};
//~^ ERROR unused imports: `None`, `Some`
-//~| ERROR unused imports: `None`, `Some`
use test::A; //~ ERROR unused import: `test::A`
// Be sure that if we just bring some methods into scope that they're also
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test for RFC 1268: we allow overlapping impls of marker traits,
+// that is, traits without items. In this case, a type `T` is
+// `MyMarker` if it is either `Debug` or `Display`. This test just
+// checks that we don't consider **all** types to be `MyMarker`. See
+// also the companion test in
+// `run-pass/overlap-permitted-for-marker-traits.rs`.
+
+#![feature(overlapping_marker_traits)]
+#![feature(optin_builtin_traits)]
+
+use std::fmt::{Debug, Display};
+
+trait Marker {}
+
+impl<T: Debug> Marker for T {}
+impl<T: Display> Marker for T {}
+
+fn is_marker<T: Marker>() { }
+
+struct NotDebugOrDisplay;
+
+fn main() {
+ // Debug && Display:
+ is_marker::<i32>();
+
+ // Debug && !Display:
+ is_marker::<Vec<i32>>();
+
+ // !Debug && !Display
+ is_marker::<NotDebugOrDisplay>(); //~ ERROR
+}
#![feature(specialization)]
-trait Foo {}
+trait Foo { fn foo() {} }
impl<T: Clone> Foo for T {}
impl<T> Foo for Vec<T> {} //~ ERROR E0119
-trait Bar {}
+trait Bar { fn bar() {} }
impl<T> Bar for (T, u8) {}
impl<T> Bar for (u8, T) {} //~ ERROR E0119
-trait Baz<U> {}
+trait Baz<U> { fn baz() {} }
impl<T> Baz<T> for u8 {}
impl<T> Baz<u8> for T {} //~ ERROR E0119
-trait Qux {}
+trait Qux { fn qux() {} }
impl<T: Clone> Qux for T {}
impl<T: Eq> Qux for T {} //~ ERROR E0119
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(untagged_unions)]
+#![allow(unused)]
+
+#[allow(unions_with_drop_fields)]
+union U {
+ x: ((Vec<u8>, Vec<u8>), Vec<u8>),
+ y: Box<Vec<u8>>,
+}
+
+unsafe fn parent_sibling_borrow() {
+ let mut u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
+ let a = &mut u.x.0;
+ let a = &u.y; //~ ERROR cannot borrow `u.y`
+}
+
+unsafe fn parent_sibling_move() {
+ let u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
+ let a = u.x.0;
+ let a = u.y; //~ ERROR use of moved value: `u.y`
+}
+
+unsafe fn grandparent_sibling_borrow() {
+ let mut u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
+ let a = &mut (u.x.0).0;
+ let a = &u.y; //~ ERROR cannot borrow `u.y`
+}
+
+unsafe fn grandparent_sibling_move() {
+ let u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
+ let a = (u.x.0).0;
+ let a = u.y; //~ ERROR use of moved value: `u.y`
+}
+
+unsafe fn deref_sibling_borrow() {
+ let mut u = U { y: Box::default() };
+ let a = &mut *u.y;
+ let a = &u.x; //~ ERROR cannot borrow `u` (via `u.x`)
+}
+
+unsafe fn deref_sibling_move() {
+ let u = U { x: ((Vec::new(), Vec::new()), Vec::new()) };
+ let a = *u.y;
+ let a = u.x; //~ ERROR use of moved value: `u.x`
+}
+
+
+fn main() {}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-pub enum EnumVisibility { A }
+pub enum EnumVisibility {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ A
+}
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum EnumChangeNameCStyleVariant {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
Variant1,
+ #[rustc_metadata_clean(cfg="cfail3")]
Variant2Changed,
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum EnumChangeFieldTypeTupleStyleVariant {
- Variant1(u32, u64),
+ Variant1(u32,
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ u64),
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum EnumChangeFieldTypeStructStyleVariant {
Variant1,
- Variant2 { a: u32, b: u64 },
+ Variant2 {
+ a: u32,
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ b: u64
+ },
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum EnumChangeOrderTupleStyleVariant {
- Variant1(u64, u32),
+ Variant1(
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ u64,
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ u32),
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum EnumSwapUsageTypeParameters<A, B> {
- Variant1 { a: B },
- Variant2 { a: A },
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ Variant1 {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ a: B
+ },
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ Variant2 {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ a: A
+ },
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum EnumSwapUsageLifetimeParameters<'a, 'b> {
- Variant1 { a: &'b u32 },
- Variant2 { b: &'a u32 },
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ Variant1 {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ a: &'b u32
+ },
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ Variant2 {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ b: &'a u32
+ },
}
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum TupleStyle {
- Variant1(FieldType)
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ Variant1(
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ FieldType
+ )
}
}
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
enum StructStyle {
- Variant1 { a: FieldType }
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ Variant1 {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ a: FieldType
+ }
}
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn change_parameter_name(d: i64) -> i32;
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn change_parameter_type(c: i32) -> i32;
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn change_return_type(c: i32) -> i8;
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn add_parameter(c: i32, d: i32) -> i32;
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn add_return_type(c: i32) -> i32;
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn make_function_variadic(c: i32, ...);
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern "rust-call" {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn change_calling_convention(c: i32);
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn make_function_public(c: i32);
}
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn indirectly_change_parameter_type(c: c_int);
}
}
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
extern {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
pub fn indirectly_change_return_type() -> c_int;
}
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")] // The type doesn't change, so metadata is the same
#[rustc_metadata_clean(cfg="cfail3")]
fn add_return_type() -> () {}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+// #[rustc_metadata_dirty(cfg="cfail2")] -- Unused lifetime params don't show up in the type?
#[rustc_metadata_clean(cfg="cfail3")]
fn lifetime_parameter<'a>() {}
#[cfg(cfail1)]
fn change_return_impl_trait() -> impl Clone {
- 0
+ 0u32
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")] // The actual type is the same, so: clean
#[rustc_metadata_clean(cfg="cfail3")]
fn change_return_impl_trait() -> impl Copy {
- 0
+ 0u32
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
impl Foo {
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub fn add_method_to_impl1(&self) { }
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_dirty(label="HirBody", cfg="cfail2")]
#[rustc_clean(label="HirBody", cfg="cfail3")]
- // At the moment we explicitly ignore argument names in metadata, since they
- // are not used in downstream crates (except in rustdoc)
- #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub fn change_method_parameter_name(&self, b: i64) { }
}
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_dirty(label="HirBody", cfg="cfail2")]
#[rustc_clean(label="HirBody", cfg="cfail3")]
- // At the moment we explicitly ignore argument names in metadata, since they
- // are not used in downstream crates (except in rustdoc)
- #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub fn change_method_parameter_order(&self, b: i64, a: i64) { }
}
impl Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")] // Apparently unused lifetimes don't show up in the type.
#[rustc_metadata_clean(cfg="cfail3")]
pub fn add_lifetime_parameter_to_method<'a>(&self) { }
}
impl<T: 'static> Bar<T> {
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub fn add_lifetime_bound_to_impl_parameter(&self) { }
}
impl<T: Clone> Bar<T> {
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub fn add_trait_bound_to_impl_parameter(&self) { }
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct TupleStructFieldType(u32);
+struct TupleStructFieldType(
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ u32
+);
// Tuple Struct Add Field ------------------------------------------------------
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct TupleStructAddField(i32, u32);
+struct TupleStructAddField(
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ i32,
+ #[rustc_metadata_clean(cfg="cfail3")]
+ u32
+);
// Tuple Struct Field Visibility -----------------------------------------------
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct RecordStructFieldType { x: u64 }
+struct RecordStructFieldType {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ x: u64
+}
// Record Struct Field Name ----------------------------------------------------
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct RecordStructAddField { x: f32, y: () }
+struct RecordStructAddField {
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ x: f32,
+ #[rustc_metadata_clean(cfg="cfail3")]
+ y: () }
// Record Struct Field Visibility ----------------------------------------------
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct RecordStructFieldVisibility { pub x: f32 }
+struct RecordStructFieldVisibility {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ pub x: f32
+}
// Add Lifetime Parameter ------------------------------------------------------
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct AddLifetimeParameterBound<'a, 'b: 'a>(&'a f32, &'b f64);
+struct AddLifetimeParameterBound<'a, 'b: 'a>(
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ &'a f32,
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ &'b f64
+);
#[cfg(cfail1)]
struct AddLifetimeParameterBoundWhereClause<'a, 'b>(&'a f32, &'b f64);
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct AddLifetimeParameterBoundWhereClause<'a, 'b>(&'a f32, &'b f64)
+struct AddLifetimeParameterBoundWhereClause<'a, 'b>(
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ &'a f32,
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ &'b f64)
where 'b: 'a;
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct AddTypeParameter<T1, T2>(T1, T2);
+struct AddTypeParameter<T1, T2>(
+ // The field contains the parent's Generics, so it's dirty even though its
+ // type hasn't changed.
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ T1,
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ T2
+);
// Add Type Parameter Bound ----------------------------------------------------
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct AddTypeParameterBound<T: Send>(T);
+struct AddTypeParameterBound<T: Send>(
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ T
+);
#[cfg(cfail1)]
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
-struct AddTypeParameterBoundWhereClause<T>(T) where T: Sync;
+struct AddTypeParameterBoundWhereClause<T>(
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ T
+) where T: Sync;
// Empty struct ----------------------------------------------------------------
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub struct Visibility;
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
- struct TupleStruct(FieldType);
+ struct TupleStruct(
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ FieldType
+ );
}
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
struct RecordStruct {
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
_x: FieldType
}
}
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddReturnType {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeReturnType {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddParameterToMethod {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeMethodParameterName {
// FIXME(#38501) This should preferably always be clean.
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeMethodParameterType {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeMethodParameterTypeRef {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeMethodParametersOrder {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddMethodDefaultImplementation {
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
fn method() { }
}
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeModeSelfRefToMut {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeModeSelfOwnToRef {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddUnsafeModifier {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddExternModifier {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeExternCToRustIntrinsic {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddTypeParameterToMethod {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddLifetimeParameterToMethod {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")] // Unused lifetimes don't seem to show up in type?
#[rustc_metadata_clean(cfg="cfail3")]
fn method<'a>();
}
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddTraitBoundToMethodTypeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddBuiltinBoundToMethodTypeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddLifetimeBoundToMethodLifetimeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddSecondTraitBoundToMethodTypeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddSecondBuiltinBoundToMethodTypeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddSecondLifetimeBoundToMethodLifetimeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
// Add associated type ------------------------------------------------------------
#[cfg(cfail1)]
trait TraitAddAssociatedType {
- fn mathod();
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn method();
}
#[cfg(not(cfail1))]
trait TraitAddAssociatedType {
type Associated;
- fn mathod();
+ fn method();
}
trait TraitAddTraitBoundToAssociatedType {
type Associated;
- fn mathod();
+ fn method();
}
+
+// Apparently the type bound contributes to the predicates of the trait, but
+// does not change the associated item itself.
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
trait TraitAddTraitBoundToAssociatedType {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
type Associated: ReferencedTrait0;
- fn mathod();
+ fn method();
}
trait TraitAddLifetimeBoundToAssociatedType<'a> {
type Associated;
- fn mathod();
+ fn method();
}
#[cfg(not(cfail1))]
trait TraitAddLifetimeBoundToAssociatedType<'a> {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
type Associated: 'a;
- fn mathod();
+ fn method();
}
trait TraitAddDefaultToAssociatedType {
type Associated;
- fn mathod();
+ fn method();
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddDefaultToAssociatedType {
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
type Associated = ReferenceType0;
- fn mathod();
+ fn method();
}
// Add associated constant --------------------------------------------------------
#[cfg(cfail1)]
trait TraitAddAssociatedConstant {
- fn mathod();
+ fn method();
}
#[cfg(not(cfail1))]
trait TraitAddAssociatedConstant {
const Value: u32;
- fn mathod();
+ fn method();
}
trait TraitAddInitializerToAssociatedConstant {
const Value: u32;
- fn mathod();
+ fn method();
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitAddInitializerToAssociatedConstant {
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
const Value: u32 = 1;
- fn mathod();
+ #[rustc_clean(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn method();
}
trait TraitChangeTypeOfAssociatedConstant {
const Value: u32;
- fn mathod();
+ fn method();
}
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeTypeOfAssociatedConstant {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
const Value: f64;
- fn mathod();
+ #[rustc_clean(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_clean(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn method();
}
trait TraitAddSecondBuiltinBoundToTypeParameterOfTraitWhere<T> where T: Send + Sync { }
-
-// EDIT: Some more cases ----------------------------------------------------------
-
// Change return type of method indirectly by modifying a use statement------------
mod change_return_type_of_method_indirectly_use {
#[cfg(cfail1)]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeReturnType {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeArgType {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeBoundOfMethodTypeParameter {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
trait TraitChangeBoundOfMethodTypeParameterWhere {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
#[inline]
fn method_name() {
- ()
+ panic!()
}
}
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl ChangeMethodSelfnessTrait for Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl RemoveMethodSelfnessTrait for Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
- fn method_name() {
- ()
- }
+ fn method_name() {}
}
// Change Method Selfmutness -----------------------------------------------------------
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl ChangeMethodSelfmutnessTrait for Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
- fn method_name(&mut self) {
- ()
- }
+ fn method_name(&mut self) {}
}
// Change item kind -----------------------------------------------------------
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
pub trait ChangeHasValueTrait {
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
fn method_name() { }
}
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl ChangeHasValueTrait for Foo {
fn method_name() { }
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl AddDefaultTrait for Foo {
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
default fn method_name() { }
}
-// Remove default
-
-pub trait RemoveDefaultTrait {
- fn method_name();
-}
-
-#[cfg(cfail1)]
-impl RemoveDefaultTrait for Foo {
- default fn method_name() { }
-}
-
-#[cfg(not(cfail1))]
-#[rustc_dirty(label="Hir", cfg="cfail2")]
-#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
-#[rustc_metadata_clean(cfg="cfail3")]
-impl RemoveDefaultTrait for Foo {
- fn method_name() { }
-}
-
// Add arguments
#[cfg(cfail1)]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl AddArgumentTrait for Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[cfg(not(cfail1))]
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
-#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
impl ChangeArgumentTypeTrait for Foo {
#[rustc_dirty(label="Hir", cfg="cfail2")]
impl<T: 'static> AddLifetimeBoundToImplParameter for T {
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
fn id(self) -> Self { self }
}
impl<T: Clone> AddTraitBoundToImplParameter for T {
#[rustc_clean(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
- #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
fn id(self) -> Self { self }
}
}
}
-struct _Struct {
- #[rustc_metadata_dirty(cfg="cfail2")]
- //[cfail2]~^ ERROR found unchecked #[rustc_dirty]/#[rustc_clean] attribute
- _field1: i32,
-
- #[rustc_metadata_clean(cfg="cfail2")]
- //[cfail2]~^ ERROR found unchecked #[rustc_dirty]/#[rustc_clean] attribute
- _field2: i32,
-}
-
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z parse-only
-
-fn main () {
- (1, (2, 3)).1.1; //~ ERROR unexpected token
- //~^ HELP try parenthesizing the first index
- //~| SUGGESTION ((1, (2, 3)).1).1
-}
int32_t e;
};
+struct FloatPoint {
+ double x;
+ double y;
+};
+
// System V x86_64 ABI:
// a, b, c, d, e should be in registers
// s should be byval pointer
return s;
}
+
+// System V x86_64 ABI:
+// p should be in registers
+// return should be in registers
+//
+// Win64 ABI:
+// p should be a byval pointer
+// return should be in a hidden sret pointer
+struct FloatPoint float_point(struct FloatPoint p) {
+ assert(p.x == 5.);
+ assert(p.y == -3.);
+
+ return p;
+}
e: i32
}
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[repr(C)]
+struct FloatPoint {
+ x: f64,
+ y: f64
+}
+
#[link(name = "test", kind = "static")]
extern {
fn byval_rect(a: i32, b: i32, c: i32, d: i32, e: i32, s: Rect);
fn sret_split_struct(a: i32, b: i32, s: Rect) -> BiggerRect;
fn huge_struct(s: Huge) -> Huge;
+
+ fn float_point(p: FloatPoint) -> FloatPoint;
}
fn main() {
let t = BiggerRect { s: s, a: 27834, b: 7657 };
let u = FloatRect { a: 3489, b: 3490, c: 8. };
let v = Huge { a: 5647, b: 5648, c: 5649, d: 5650, e: 5651 };
+ let p = FloatPoint { x: 5., y: -3. };
unsafe {
byval_rect(1, 2, 3, 4, 5, s);
assert_eq!(split_ret_byval_struct(1, 2, s), s);
assert_eq!(sret_byval_struct(1, 2, 3, 4, s), t);
assert_eq!(sret_split_struct(1, 2, s), t);
+ assert_eq!(float_point(p), p);
}
}
}
}
+#[no_mangle]
+pub extern fn __rust_allocate_zeroed(size: usize, _align: usize) -> *mut u8 {
+ unsafe { libc::calloc(size as libc::size_t, 1) as *mut u8 }
+}
+
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
unsafe {
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-
-#![feature(default_type_parameter_fallback)]
-
-use std::marker::PhantomData;
-
-trait Id {
- type This;
-}
-
-impl<A> Id for A {
- type This = A;
-}
-
-struct Foo<X: Default = usize, Y = <X as Id>::This> {
- data: PhantomData<(X, Y)>
-}
-
-impl<X: Default, Y> Foo<X, Y> {
- fn new() -> Foo<X, Y> {
- Foo { data: PhantomData }
- }
-}
-
-fn main() {
- let foo = Foo::new();
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-
-#![feature(default_type_parameter_fallback)]
-use std::marker::PhantomData;
-
-struct Foo<T,U=T> { t: T, data: PhantomData<U> }
-
-fn main() {
- let foo = Foo { t: 'a', data: PhantomData };
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(default_type_parameter_fallback)]
-
-struct Foo;
-
-impl Foo {
- fn method<A:Default=String>(&self) -> A {
- A::default()
- }
-}
-
-fn main() {
- let f = Foo.method();
- println!("{}", f);
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(default_type_parameter_fallback)]
-
-struct Foo<A>(A);
-
-impl<A:Default=i32> Foo<A> {
- fn new() -> Foo<A> {
- Foo(A::default())
- }
-}
-
-fn main() {
- let foo = Foo::new();
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-
-#![feature(default_type_parameter_fallback)]
-
-use std::marker::PhantomData;
-
-pub struct DeterministicHasher;
-pub struct RandomHasher;
-
-
-pub struct MyHashMap<K, V, H=DeterministicHasher> {
- data: PhantomData<(K, V, H)>
-}
-
-impl<K, V, H> MyHashMap<K, V, H> {
- fn new() -> MyHashMap<K, V, H> {
- MyHashMap { data: PhantomData }
- }
-}
-
-mod mystd {
- use super::{MyHashMap, RandomHasher};
- pub type HashMap<K, V, H=RandomHasher> = MyHashMap<K, V, H>;
-}
-
-fn try_me<H>(hash_map: mystd::HashMap<i32, i32, H>) {}
-
-fn main() {
- let hash_map = mystd::HashMap::new();
- try_me(hash_map);
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(default_type_parameter_fallback)]
-
-// Another example from the RFC
-trait Foo { }
-trait Bar { }
-
-impl<T:Bar=usize> Foo for Vec<T> {}
-impl Bar for usize {}
-
-fn takes_foo<F:Foo>(f: F) {}
-
-fn main() {
- let x = Vec::new(); // x: Vec<$0>
- takes_foo(x); // adds oblig Vec<$0> : Foo
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(default_type_parameter_fallback)]
-
-// An example from the RFC
-trait Foo { fn takes_foo(&self); }
-trait Bar { }
-
-impl<T:Bar=usize> Foo for Vec<T> {
- fn takes_foo(&self) {}
-}
-
-impl Bar for usize {}
-
-fn main() {
- let x = Vec::new(); // x: Vec<$0>
- x.takes_foo(); // adds oblig Vec<$0> : Foo
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(default_type_parameter_fallback)]
-
-use std::collections::HashMap;
-
-type IntMap<K=usize> = HashMap<K, usize>;
-
-fn main() {
- let x = IntMap::new();
-}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(global_asm)]
+
+#[cfg(target_arch = "x86")]
+global_asm!("");
+
+#[cfg(target_arch = "x86_64")]
+global_asm!("");
+
+#[cfg(target_arch = "arm")]
+global_asm!("");
+
+#[cfg(target_arch = "aarch64")]
+global_asm!("");
+
+#[cfg(target_arch = "mips")]
+global_asm!("");
+
+fn main() {}
assert_eq!(l.checked_sub(l), Some(0));
assert_eq!(b(1u128).checked_shl(b(127)), Some(1 << 127));
assert_eq!(o.checked_shl(b(128)), None);
+
+ // https://github.com/rust-lang/rust/issues/41228
+ assert_eq!(b(-87559967289969187895646876466835277875_i128) /
+ b(84285771033834995895337664386045050880_i128),
+ -1i128);
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #40951.
+
+const FOO: [&'static str; 1] = ["foo"];
+
+fn find<T: PartialEq>(t: &[T], element: &T) { }
+
+fn main() {
+ let x = format!("hi");
+ find(&FOO, &&*x);
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum A {
+ A1,
+ A2,
+ A3,
+}
+
+enum B {
+ B1(String, String),
+ B2(String, String),
+}
+
+fn main() {
+ let a = A::A1;
+ loop {
+ let _ctor = match a {
+ A::A3 => break,
+ A::A1 => B::B1,
+ A::A2 => B::B2,
+ };
+ break;
+ }
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo;
+
+impl Foo {
+ fn bar(&mut self) -> bool { true }
+}
+
+fn error(foo: &mut Foo) {
+ if let Some(_) = Some(true) {
+ } else if foo.bar() {}
+}
+
+fn ok(foo: &mut Foo) {
+ if let Some(_) = Some(true) {
+ } else {
+ if foo.bar() {}
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code, unused_imports)]
+#![feature(macro_vis_matcher)]
+
+/**
+Ensure that `:vis` matches can be captured in existing positions, and passed
+through without the need for reparse tricks.
+*/
+macro_rules! vis_passthru {
+ ($vis:vis const $name:ident: $ty:ty = $e:expr;) => { $vis const $name: $ty = $e; };
+ ($vis:vis enum $name:ident {}) => { $vis struct $name {} };
+ ($vis:vis extern "C" fn $name:ident() {}) => { $vis extern "C" fn $name() {} };
+ ($vis:vis fn $name:ident() {}) => { $vis fn $name() {} };
+ ($vis:vis mod $name:ident {}) => { $vis mod $name {} };
+ ($vis:vis static $name:ident: $ty:ty = $e:expr;) => { $vis static $name: $ty = $e; };
+ ($vis:vis struct $name:ident;) => { $vis struct $name; };
+ ($vis:vis trait $name:ident {}) => { $vis trait $name {} };
+ ($vis:vis type $name:ident = $ty:ty;) => { $vis type $name = $ty; };
+ ($vis:vis use $path:ident as $name:ident;) => { $vis use self::$path as $name; };
+}
+
+mod with_pub {
+ vis_passthru! { pub const A: i32 = 0; }
+ vis_passthru! { pub enum B {} }
+ vis_passthru! { pub extern "C" fn c() {} }
+ vis_passthru! { pub mod d {} }
+ vis_passthru! { pub static E: i32 = 0; }
+ vis_passthru! { pub struct F; }
+ vis_passthru! { pub trait G {} }
+ vis_passthru! { pub type H = i32; }
+ vis_passthru! { pub use A as I; }
+}
+
+mod without_pub {
+ vis_passthru! { const A: i32 = 0; }
+ vis_passthru! { enum B {} }
+ vis_passthru! { extern "C" fn c() {} }
+ vis_passthru! { mod d {} }
+ vis_passthru! { static E: i32 = 0; }
+ vis_passthru! { struct F; }
+ vis_passthru! { trait G {} }
+ vis_passthru! { type H = i32; }
+ vis_passthru! { use A as I; }
+}
+
+mod with_pub_restricted {
+ vis_passthru! { pub(crate) const A: i32 = 0; }
+ vis_passthru! { pub(crate) enum B {} }
+ vis_passthru! { pub(crate) extern "C" fn c() {} }
+ vis_passthru! { pub(crate) mod d {} }
+ vis_passthru! { pub(crate) static E: i32 = 0; }
+ vis_passthru! { pub(crate) struct F; }
+ vis_passthru! { pub(crate) trait G {} }
+ vis_passthru! { pub(crate) type H = i32; }
+ vis_passthru! { pub(crate) use A as I; }
+}
+
+mod garden {
+ mod with_pub_restricted_path {
+ vis_passthru! { pub(in garden) const A: i32 = 0; }
+ vis_passthru! { pub(in garden) enum B {} }
+ vis_passthru! { pub(in garden) extern "C" fn c() {} }
+ vis_passthru! { pub(in garden) mod d {} }
+ vis_passthru! { pub(in garden) static E: i32 = 0; }
+ vis_passthru! { pub(in garden) struct F; }
+ vis_passthru! { pub(in garden) trait G {} }
+ vis_passthru! { pub(in garden) type H = i32; }
+ vis_passthru! { pub(in garden) use A as I; }
+ }
+}
+
+/*
+Ensure that the `:vis` matcher works in a more complex situation: parsing a
+struct definition.
+*/
+macro_rules! vis_parse_struct {
+ ($(#[$($attrs:tt)*])* $vis:vis struct $name:ident {$($body:tt)*}) => {
+ vis_parse_struct! { @parse_fields $(#[$($attrs)*])*, $vis, $name, $($body)* }
+ };
+
+ ($(#[$($attrs:tt)*])* $vis:vis struct $name:ident ($($body:tt)*);) => {
+ vis_parse_struct! { @parse_tuple $(#[$($attrs)*])*, $vis, $name, $($body)* }
+ };
+
+ (@parse_fields
+ $(#[$attrs:meta])*, $vis:vis, $name:ident, $($fvis:vis $fname:ident: $fty:ty),* $(,)*) => {
+ $(#[$attrs])* $vis struct $name { $($fvis $fname: $fty,)* }
+ };
+
+ (@parse_tuple
+ $(#[$attrs:meta])*, $vis:vis, $name:ident, $($fvis:vis $fty:ty),* $(,)*) => {
+ $(#[$attrs])* $vis struct $name ( $($fvis $fty,)* );
+ };
+}
+
+mod test_struct {
+ vis_parse_struct! { pub(crate) struct A { pub a: i32, b: i32, pub(crate) c: i32 } }
+ vis_parse_struct! { pub struct B { a: i32, pub(crate) b: i32, pub c: i32 } }
+ vis_parse_struct! { struct C { pub(crate) a: i32, pub b: i32, c: i32 } }
+
+ vis_parse_struct! { pub(crate) struct D (pub i32, i32, pub(crate) i32); }
+ vis_parse_struct! { pub struct E (i32, pub(crate) i32, pub i32); }
+ vis_parse_struct! { struct F (pub(crate) i32, pub i32, i32); }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name="foo"]
+
+use std::mem::size_of;
+
+// compile-flags: -Z fuel=foo=0
+
+struct S1(u8, u16, u8);
+struct S2(u8, u16, u8);
+
+fn main() {
+ assert_eq!(size_of::<S1>(), 6);
+ assert_eq!(size_of::<S2>(), 6);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name="foo"]
+
+use std::mem::size_of;
+
+// compile-flags: -Z fuel=foo=1
+
+struct S1(u8, u16, u8);
+struct S2(u8, u16, u8);
+
+fn main() {
+ let optimized = (size_of::<S1>() == 4) as usize
+ +(size_of::<S2>() == 4) as usize;
+ assert_eq!(optimized, 1);
+}
+
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(overlapping_marker_traits)]
+#![feature(specialization)]
+
+trait MyMarker {}
+
+impl<T> MyMarker for T {}
+impl<T> MyMarker for Vec<T> {}
+
+fn foo<T: MyMarker>(t: T) -> T {
+ t
+}
+
+fn main() {
+ assert_eq!(1, foo(1));
+ assert_eq!(2.0, foo(2.0));
+ assert_eq!(vec![1], foo(vec![1]));
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(overlapping_marker_traits)]
+#![feature(optin_builtin_traits)]
+
+// Overlapping negative impls for `MyStruct` are permitted:
+struct MyStruct;
+impl !Send for MyStruct {}
+impl !Send for MyStruct {}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Tests for RFC 1268: we allow overlapping impls of marker traits,
+// that is, traits without items. In this case, a type `T` is
+// `MyMarker` if it is either `Debug` or `Display`.
+
+#![feature(overlapping_marker_traits)]
+#![feature(optin_builtin_traits)]
+
+use std::fmt::{Debug, Display};
+
+trait MyMarker {}
+
+impl<T: Debug> MyMarker for T {}
+impl<T: Display> MyMarker for T {}
+
+fn foo<T: MyMarker>(t: T) -> T {
+ t
+}
+
+fn main() {
+ // Debug && Display:
+ assert_eq!(1, foo(1));
+ assert_eq!(2.0, foo(2.0));
+
+ // Debug && !Display:
+ assert_eq!(vec![1], foo(vec![1]));
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(global_asm)]
+#![feature(naked_functions)]
+
+#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
+global_asm!(r#"
+ .global foo
+ .global _foo
+foo:
+_foo:
+ ret
+"#);
+
+extern {
+ fn foo();
+}
+
+#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
+fn main() { unsafe { foo(); } }
+
+#[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))]
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test a scenario where we generate a constraint like `?1 <: &?2`.
+// In such a case, it is important that we instantiate `?1` with `&?3`
+// where `?3 <: ?2`, and not with `&?2`. This is a regression test for
+// #18653. The important thing is that we build.
+
+use std::cell::RefCell;
+
+enum Wrap<A> {
+ WrapSome(A),
+ WrapNone
+}
+
+use Wrap::*;
+
+struct T;
+struct U;
+
+trait Get<T: ?Sized> {
+ fn get(&self) -> &T;
+}
+
+impl Get<MyShow + 'static> for Wrap<T> {
+ fn get(&self) -> &(MyShow + 'static) {
+ static x: usize = 42;
+ &x
+ }
+}
+
+impl Get<usize> for Wrap<U> {
+ fn get(&self) -> &usize {
+ static x: usize = 55;
+ &x
+ }
+}
+
+trait MyShow { fn dummy(&self) { } }
+impl<'a> MyShow for &'a (MyShow + 'a) { }
+impl MyShow for usize { }
+fn constrain<'a>(rc: RefCell<&'a (MyShow + 'a)>) { }
+
+fn main() {
+ let mut collection: Wrap<_> = WrapNone;
+
+ {
+ let __arg0 = Get::get(&collection);
+ let __args_cell = RefCell::new(__arg0);
+ constrain(__args_cell);
+ }
+ collection = WrapSome(T);
+}
a([u16; 0], u8), b
}
+struct ReorderedStruct {
+ a: u8,
+ b: u16,
+ c: u8
+}
+
+enum ReorderedEnum {
+ A(u8, u16, u8),
+ B(u8, u16, u8),
+}
+
pub fn main() {
assert_eq!(size_of::<u8>(), 1 as usize);
assert_eq!(size_of::<u32>(), 4 as usize);
assert_eq!(size_of::<e1>(), 8 as usize);
assert_eq!(size_of::<e2>(), 8 as usize);
assert_eq!(size_of::<e3>(), 4 as usize);
+ assert_eq!(size_of::<ReorderedStruct>(), 4);
+ assert_eq!(size_of::<ReorderedEnum>(), 6);
}
assert_eq!(o.checked_sub(b(18)), None);
assert_eq!(b(1u128).checked_shl(b(127)), Some(1 << 127));
assert_eq!(o.checked_shl(b(128)), None);
+
+ // Test cases for all udivmodti4 branches.
+ // case "0X/0X"
+ assert_eq!(b(0x69545bd57727c050_u128) /
+ b(0x3283527a3350d88c_u128),
+ 2u128);
+ // case "0X/KX"
+ assert_eq!(b(0x0_8003c9c50b473ae6_u128) /
+ b(0x1_283e8838c30fa8f4_u128),
+ 0u128);
+ // case "K0/K0"
+ assert_eq!(b(0xc43f42a207978720_u128 << 64) /
+ b(0x098e62b74c23cf1a_u128 << 64),
+ 20u128);
+ // case "KK/K0" for power-of-two D.
+ assert_eq!(b(0xa9008fb6c9d81e42_0e25730562a601c8_u128) /
+ b(1u128 << 120),
+ 169u128);
+ // case "KK/K0" with N >= D (https://github.com/rust-lang/rust/issues/41228).
+ assert_eq!(b(0xe4d26e59f0640328_06da5b06efe83a41_u128) /
+ b(0x330fcb030ea4447c_u128 << 64),
+ 4u128);
+ assert_eq!(b(3u128 << 64 | 1) /
+ b(3u128 << 64),
+ 1u128);
+ // case "KK/K0" with N < D.
+ assert_eq!(b(0x6655c9fb66ca2884_e2d1dfd470158c62_u128) /
+ b(0xb35b667cab7e355b_u128 << 64),
+ 0u128);
+ // case "KX/0K" for power-of-two D.
+ assert_eq!(b(0x3e49dd84feb2df59_7b2f97d93a253969_u128) /
+ b(1u128 << 4),
+ 0x03e49dd84feb2df5_97b2f97d93a25396_u128);
+ // case "KX/0K" in general.
+ assert_eq!(b(0x299692b3a1dae5bd_6162e6f489d2620e_u128) /
+ b(0x900b6f027571d6f7_u128),
+ 0x49e95f54b0442578_u128);
+ // case "KX/KK" with N >= D.
+ assert_eq!(b(0xc7b889180b67b07d_bc1a3c88783d35b5_u128) /
+ b(0x1d7e69f53160b9e2_60074771e852f244_u128),
+ 6u128);
+ // case "KX/KK" with N < D.
+ assert_eq!(b(0x679289ac23bb334f_36144401cf882172_u128) /
+ b(0x7b0b271b64865f05_f54a7b72746c062f_u128),
+ 0u128);
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "foo"]
+
+// ignore-tidy-linelength
+
+pub trait Expression {
+ type SqlType;
+}
+
+pub trait AsExpression<T> {
+ type Expression: Expression<SqlType = T>;
+ fn as_expression(self) -> Self::Expression;
+}
+
+// @has foo/type.AsExprOf.html
+// @has - '//*[@class="rust typedef"]' 'type AsExprOf<Item, Type> = <Item as AsExpression<Type>>::Expression;'
+pub type AsExprOf<Item, Type> = <Item as AsExpression<Type>>::Expression;
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub mod outermod {
+ pub mod innermod {
+ pub use super::*;
+ }
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-40936.rs
+// build-aux-docs
+
+#![crate_name = "foo"]
+
+extern crate issue_40936;
16 | x.push(y);
| ^ lifetime mismatch
|
- = note: expected type `Ref<'a, i32>`
- found type `Ref<'_, i32>`
+ = note: expected type `Ref<'a, _>`
+ found type `Ref<'_, _>`
note: the anonymous lifetime #2 defined on the body at 15:51...
--> $DIR/ex2a-push-one-existing-name.rs:15:52
|
16 | x.push(y);
| ^ lifetime mismatch
|
- = note: expected type `Ref<'_, i32>`
- found type `Ref<'_, i32>`
+ = note: expected type `Ref<'_, _>`
+ found type `Ref<'_, _>`
note: the anonymous lifetime #3 defined on the body at 15:43...
--> $DIR/ex2b-push-no-existing-names.rs:15:44
|
17 | | x.push(z);
18 | | }
| |_^ ...ending here
-note: ...so that expression is assignable (expected Ref<'b, i32>, found Ref<'_, i32>)
+note: ...so that expression is assignable (expected Ref<'b, _>, found Ref<'_, _>)
--> $DIR/ex2c-push-inference-variable.rs:17:12
|
17 | x.push(z);
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Bar {
+ Qux,
+ Zar,
+}
+
+struct Foo {
+ bar: usize,
+}
+
+struct X<T1, T2> {
+ x: T1,
+ y: T2,
+}
+
+fn a() -> Foo {
+ Some(Foo { bar: 1 })
+}
+
+fn a2() -> Foo {
+ Ok(Foo { bar: 1})
+}
+
+fn b() -> Option<Foo> {
+ Foo { bar: 1 }
+}
+
+fn c() -> Result<Foo, Bar> {
+ Foo { bar: 1 }
+}
+
+fn d() -> X<X<String, String>, String> {
+ X {
+ x: X {
+ x: "".to_string(),
+ y: 2,
+ },
+ y: 3,
+ }
+}
+
+fn e() -> X<X<String, String>, String> {
+ X {
+ x: X {
+ x: "".to_string(),
+ y: 2,
+ },
+ y: "".to_string(),
+ }
+}
+
+fn main() {}
--- /dev/null
+error[E0308]: mismatched types
+ --> $DIR/abridged.rs:26:5
+ |
+26 | Some(Foo { bar: 1 })
+ | ^^^^^^^^^^^^^^^^^^^^ expected struct `Foo`, found enum `std::option::Option`
+ |
+ = note: expected type `Foo`
+ found type `std::option::Option<Foo>`
+
+error[E0308]: mismatched types
+ --> $DIR/abridged.rs:30:5
+ |
+30 | Ok(Foo { bar: 1})
+ | ^^^^^^^^^^^^^^^^^ expected struct `Foo`, found enum `std::result::Result`
+ |
+ = note: expected type `Foo`
+ found type `std::result::Result<Foo, _>`
+
+error[E0308]: mismatched types
+ --> $DIR/abridged.rs:34:5
+ |
+34 | Foo { bar: 1 }
+ | ^^^^^^^^^^^^^^ expected enum `std::option::Option`, found struct `Foo`
+ |
+ = note: expected type `std::option::Option<Foo>`
+ found type `Foo`
+
+error[E0308]: mismatched types
+ --> $DIR/abridged.rs:38:5
+ |
+38 | Foo { bar: 1 }
+ | ^^^^^^^^^^^^^^ expected enum `std::result::Result`, found struct `Foo`
+ |
+ = note: expected type `std::result::Result<Foo, Bar>`
+ found type `Foo`
+
+error[E0308]: mismatched types
+ --> $DIR/abridged.rs:42:5
+ |
+42 | X {
+ | _____^ starting here...
+43 | | x: X {
+44 | | x: "".to_string(),
+45 | | y: 2,
+46 | | },
+47 | | y: 3,
+48 | | }
+ | |_____^ ...ending here: expected struct `std::string::String`, found integral variable
+ |
+ = note: expected type `X<X<_, std::string::String>, std::string::String>`
+ found type `X<X<_, {integer}>, {integer}>`
+
+error[E0308]: mismatched types
+ --> $DIR/abridged.rs:52:5
+ |
+52 | X {
+ | _____^ starting here...
+53 | | x: X {
+54 | | x: "".to_string(),
+55 | | y: 2,
+56 | | },
+57 | | y: "".to_string(),
+58 | | }
+ | |_____^ ...ending here: expected struct `std::string::String`, found integral variable
+ |
+ = note: expected type `X<X<_, std::string::String>, _>`
+ found type `X<X<_, {integer}>, _>`
+
+error: aborting due to 6 previous errors
+
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name="foo"]
+#![allow(dead_code)]
+
+// compile-flags: -Z print-fuel=foo
+
+struct S1(u8, u16, u8);
+struct S2(u8, u16, u8);
+struct S3(u8, u16, u8);
+
+fn main() {
+}
--- /dev/null
+Fuel used by foo: 3
-print-type-size type: `IndirectNonZero<u32>`: 20 bytes, alignment: 4 bytes
-print-type-size field `.pre`: 1 bytes
-print-type-size padding: 3 bytes
-print-type-size field `.nested`: 12 bytes, alignment: 4 bytes
+print-type-size type: `IndirectNonZero<u32>`: 12 bytes, alignment: 4 bytes
+print-type-size field `.nested`: 8 bytes
print-type-size field `.post`: 2 bytes
-print-type-size end padding: 2 bytes
-print-type-size type: `MyOption<IndirectNonZero<u32>>`: 20 bytes, alignment: 4 bytes
-print-type-size variant `Some`: 20 bytes
-print-type-size field `.0`: 20 bytes
-print-type-size type: `EmbeddedDiscr`: 12 bytes, alignment: 4 bytes
-print-type-size variant `Record`: 10 bytes
-print-type-size field `.pre`: 1 bytes
-print-type-size padding: 3 bytes
-print-type-size field `.val`: 4 bytes, alignment: 4 bytes
-print-type-size field `.post`: 2 bytes
-print-type-size end padding: 2 bytes
-print-type-size type: `NestedNonZero<u32>`: 12 bytes, alignment: 4 bytes
print-type-size field `.pre`: 1 bytes
-print-type-size padding: 3 bytes
-print-type-size field `.val`: 4 bytes, alignment: 4 bytes
+print-type-size end padding: 1 bytes
+print-type-size type: `MyOption<IndirectNonZero<u32>>`: 12 bytes, alignment: 4 bytes
+print-type-size variant `Some`: 12 bytes
+print-type-size field `.0`: 12 bytes
+print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes
+print-type-size variant `Record`: 7 bytes
+print-type-size field `.val`: 4 bytes
+print-type-size field `.post`: 2 bytes
+print-type-size field `.pre`: 1 bytes
+print-type-size end padding: 1 bytes
+print-type-size type: `NestedNonZero<u32>`: 8 bytes, alignment: 4 bytes
+print-type-size field `.val`: 4 bytes
print-type-size field `.post`: 2 bytes
-print-type-size end padding: 2 bytes
+print-type-size field `.pre`: 1 bytes
+print-type-size end padding: 1 bytes
print-type-size type: `MyOption<core::nonzero::NonZero<u32>>`: 4 bytes, alignment: 4 bytes
print-type-size variant `Some`: 4 bytes
print-type-size field `.0`: 4 bytes
-print-type-size type: `Padded`: 16 bytes, alignment: 4 bytes
+print-type-size type: `Padded`: 12 bytes, alignment: 4 bytes
+print-type-size field `.g`: 4 bytes
+print-type-size field `.h`: 2 bytes
print-type-size field `.a`: 1 bytes
print-type-size field `.b`: 1 bytes
-print-type-size padding: 2 bytes
-print-type-size field `.g`: 4 bytes, alignment: 4 bytes
print-type-size field `.c`: 1 bytes
-print-type-size padding: 1 bytes
-print-type-size field `.h`: 2 bytes, alignment: 2 bytes
print-type-size field `.d`: 1 bytes
-print-type-size end padding: 3 bytes
+print-type-size end padding: 2 bytes
print-type-size type: `Packed`: 10 bytes, alignment: 1 bytes
print-type-size field `.a`: 1 bytes
print-type-size field `.b`: 1 bytes
print-type-size type: `E1`: 12 bytes, alignment: 4 bytes
-print-type-size discriminant: 4 bytes
-print-type-size variant `A`: 5 bytes
-print-type-size field `.0`: 4 bytes
+print-type-size discriminant: 1 bytes
+print-type-size variant `A`: 7 bytes
print-type-size field `.1`: 1 bytes
-print-type-size variant `B`: 8 bytes
-print-type-size field `.0`: 8 bytes
+print-type-size padding: 2 bytes
+print-type-size field `.0`: 4 bytes, alignment: 4 bytes
+print-type-size variant `B`: 11 bytes
+print-type-size padding: 3 bytes
+print-type-size field `.0`: 8 bytes, alignment: 4 bytes
print-type-size type: `E2`: 12 bytes, alignment: 4 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `A`: 7 bytes
print-type-size padding: 3 bytes
print-type-size field `.0`: 8 bytes, alignment: 4 bytes
print-type-size type: `S`: 8 bytes, alignment: 4 bytes
+print-type-size field `.g`: 4 bytes
print-type-size field `.a`: 1 bytes
print-type-size field `.b`: 1 bytes
-print-type-size padding: 2 bytes
-print-type-size field `.g`: 4 bytes, alignment: 4 bytes
+print-type-size end padding: 2 bytes
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn main () {
+ (1, (2, 3)).1.1;
+}
--- /dev/null
+error: unexpected token: `1.1`
+ --> $DIR/tuple-float-index.rs:14:17
+ |
+14 | (1, (2, 3)).1.1;
+ | ^^^ unexpected token
+ |
+help: try parenthesizing the first index
+ | ((1, (2, 3)).1).1;
+
+error: aborting due to previous error
+
extern crate toml;
extern crate rustc_serialize;
-use std::collections::{BTreeMap, HashMap};
+use std::collections::BTreeMap;
use std::env;
use std::fs::File;
use std::io::{self, Read, Write};
struct Manifest {
manifest_version: String,
date: String,
- pkg: HashMap<String, Package>,
+ pkg: BTreeMap<String, Package>,
}
#[derive(RustcEncodable)]
struct Package {
version: String,
- target: HashMap<String, Target>,
+ target: BTreeMap<String, Target>,
}
#[derive(RustcEncodable)]
struct Builder {
rust_release: String,
cargo_release: String,
+ rls_release: String,
input: PathBuf,
output: PathBuf,
gpg_passphrase: String,
- digests: HashMap<String, String>,
+ digests: BTreeMap<String, String>,
s3_address: String,
date: String,
rust_version: String,
cargo_version: String,
+ rls_version: String,
}
fn main() {
let date = args.next().unwrap();
let rust_release = args.next().unwrap();
let cargo_release = args.next().unwrap();
+ let rls_release = args.next().unwrap();
let s3_address = args.next().unwrap();
let mut passphrase = String::new();
t!(io::stdin().read_to_string(&mut passphrase));
Builder {
rust_release: rust_release,
cargo_release: cargo_release,
+ rls_release: rls_release,
input: input,
output: output,
gpg_passphrase: passphrase,
- digests: HashMap::new(),
+ digests: BTreeMap::new(),
s3_address: s3_address,
date: date,
rust_version: String::new(),
cargo_version: String::new(),
+ rls_version: String::new(),
}.build();
}
fn build(&mut self) {
self.rust_version = self.version("rust", "x86_64-unknown-linux-gnu");
self.cargo_version = self.version("cargo", "x86_64-unknown-linux-gnu");
+ self.rls_version = self.version("rls", "x86_64-unknown-linux-gnu");
self.digest_and_sign();
let Manifest { manifest_version, date, pkg } = self.build_manifest();
let mut manifest = Manifest {
manifest_version: "2".to_string(),
date: self.date.to_string(),
- pkg: HashMap::new(),
+ pkg: BTreeMap::new(),
};
self.package("rustc", &mut manifest.pkg, HOSTS);
self.package("rust-std", &mut manifest.pkg, TARGETS);
self.package("rust-docs", &mut manifest.pkg, TARGETS);
self.package("rust-src", &mut manifest.pkg, &["*"]);
-
- if self.rust_release == "nightly" {
- self.package("rust-analysis", &mut manifest.pkg, TARGETS);
- }
+ self.package("rls", &mut manifest.pkg, HOSTS);
+ self.package("rust-analysis", &mut manifest.pkg, TARGETS);
let mut pkg = Package {
version: self.cached_version("rust").to_string(),
- target: HashMap::new(),
+ target: BTreeMap::new(),
};
for host in HOSTS {
let filename = self.filename("rust", host);
});
}
+ extensions.push(Component {
+ pkg: "rls".to_string(),
+ target: host.to_string(),
+ });
+ extensions.push(Component {
+ pkg: "rust-analysis".to_string(),
+ target: host.to_string(),
+ });
for target in TARGETS {
if target != host {
extensions.push(Component {
target: target.to_string(),
});
}
- if self.rust_release == "nightly" {
- extensions.push(Component {
- pkg: "rust-analysis".to_string(),
- target: target.to_string(),
- });
- }
}
extensions.push(Component {
pkg: "rust-src".to_string(),
fn package(&mut self,
pkgname: &str,
- dst: &mut HashMap<String, Package>,
+ dst: &mut BTreeMap<String, Package>,
targets: &[&str]) {
let targets = targets.iter().map(|name| {
let filename = self.filename(pkgname, name);
format!("rust-src-{}.tar.gz", self.rust_release)
} else if component == "cargo" {
format!("cargo-{}-{}.tar.gz", self.cargo_release, target)
+ } else if component == "rls" {
+ format!("rls-{}-{}.tar.gz", self.rls_release, target)
} else {
format!("{}-{}-{}.tar.gz", component, self.rust_release, target)
}
fn cached_version(&self, component: &str) -> &str {
if component == "cargo" {
&self.cargo_version
+ } else if component == "rls" {
+ &self.rls_version
} else {
&self.rust_version
}
let primary_spans: Vec<_> = spans_in_this_file.iter()
.cloned()
.filter(|span| span.is_primary)
+ .take(1) // sometimes we have more than one showing up in the json; pick first
.collect();
let primary_spans = if primary_spans.is_empty() {
// subdiagnostics often don't have a span of their own;
}
fn dump_output(&self, out: &str, err: &str) {
- self.dump_output_file(out, "out");
- self.dump_output_file(err, "err");
+ let revision = if let Some(r) = self.revision {
+ format!("{}.", r)
+ } else {
+ String::new()
+ };
+
+ self.dump_output_file(out, &format!("{}out", revision));
+ self.dump_output_file(err, &format!("{}err", revision));
self.maybe_dump_to_stdout(out, err);
}
if file.ends_with("btree_set/struct.BTreeSet.html") ||
file.ends_with("collections/struct.BTreeSet.html") ||
file.ends_with("collections/btree_map/struct.BTreeMap.html") ||
- file.ends_with("collections/hash_map/struct.HashMap.html") {
+ file.ends_with("collections/hash_map/struct.HashMap.html") ||
+ file.ends_with("collections/hash_set/struct.HashSet.html") {
return None;
}
"src/llvm",
"src/libbacktrace",
"src/compiler-rt",
- "src/rt/hoedown",
"src/rustllvm",
"src/rust-installer",
"src/liblibc",
- "src/tools/cargo",
"src/vendor",
+ "src/rt/hoedown",
];
skip.iter().any(|p| path.ends_with(p))
}