-language: rust
+language: minimal
sudo: required
dist: trusty
services:
- env: IMAGE=x86_64-gnu-cargotest
- env: IMAGE=x86_64-gnu-debug
- env: IMAGE=x86_64-gnu-nopt
- - env: IMAGE=x86_64-gnu-rustbuild
+ - env: IMAGE=x86_64-gnu-make
- env: IMAGE=x86_64-gnu-llvm-3.7 ALLOW_PR=1 RUST_BACKTRACE=1
- env: IMAGE=x86_64-musl
install: brew install ccache
- env: >
RUST_CHECK_TARGET=check
- RUST_CONFIGURE_ARGS=--target=x86_64-apple-darwin --enable-rustbuild
+ RUST_CONFIGURE_ARGS=--target=x86_64-apple-darwin --disable-rustbuild
SRC=.
os: osx
install: brew install ccache
install: brew install ccache
script:
- - if [ -z "$ALLOW_PR" ] && [ "$TRAVIS_BRANCH" != "auto" ]; then
- echo skipping, not a full build;
- elif [ -z "$ENABLE_AUTO" ] then
- echo skipping, not quite ready yet
- elif [ "$TRAVIS_OS_NAME" = "osx" ]; then
- git submodule update --init;
- src/ci/run.sh;
- else
- git submodule update --init;
- src/ci/docker/run.sh $IMAGE;
- fi
+ - >
+ if [ "$ALLOW_PR" = "" ] && [ "$TRAVIS_BRANCH" != "auto" ]; then
+ echo skipping, not a full build;
+ elif [ "$TRAVIS_OS_NAME" = "osx" ]; then
+ git submodule update --init;
+ src/ci/run.sh;
+ else
+ git submodule update --init;
+ src/ci/docker/run.sh $IMAGE;
+ fi
# Save tagged docker images we created and load them if they're available
before_cache:
It's your best friend when working on Rust, allowing you to compile & test
your contributions before submission.
-All the configuration for the build system lives in [the `mk` directory][mkdir]
-in the project root. It can be hard to follow in places, as it uses some
-advanced Make features which make for some challenging reading. If you have
-questions on the build system internals, try asking in
-[`#rust-internals`][pound-rust-internals].
+The build system lives in [the `src/bootstrap` directory][bootstrap] in the
+project root. Our build system is itself written in Rust and is based on Cargo
+to actually build all the compiler's crates. If you have questions on the build
+system internals, try asking in [`#rust-internals`][pound-rust-internals].
-[mkdir]: https://github.com/rust-lang/rust/tree/master/mk/
+[bootstrap]: https://github.com/rust-lang/rust/tree/master/src/bootstrap/
+
+> **Note**: the build system was recently rewritten from a jungle of makefiles
+> to the current incarnation you'll see in `src/bootstrap`. If you experience
+> bugs you can temporarily revert back to the makefiles with
+> `--disable-rustbuild` passed to `./configure`.
### Configuration
To see a full list of options, run `./configure --help`.
-### Useful Targets
-
-Some common make targets are:
-
-- `make tips` - show useful targets, variables and other tips for working with
- the build system.
-- `make rustc-stage1` - build up to (and including) the first stage. For most
- cases we don't need to build the stage2 compiler, so we can save time by not
- building it. The stage1 compiler is a fully functioning compiler and
- (probably) will be enough to determine if your change works as expected.
-- `make $host/stage1/bin/rustc` - Where $host is a target triple like x86_64-unknown-linux-gnu.
- This will build just rustc, without libstd. This is the fastest way to recompile after
- you changed only rustc source code. Note however that the resulting rustc binary
- won't have a stdlib to link against by default. You can build libstd once with
- `make rustc-stage1`, rustc will pick it up afterwards. libstd is only guaranteed to
- work if recompiled, so if there are any issues recompile it.
-- `make check` - build the full compiler & run all tests (takes a while). This
+### Building
+
+Although the `./configure` script will generate a `Makefile`, this is actually
+just a thin veneer over the actual build system driver, `x.py`. This file, at
+the root of the repository, is used to build, test, and document various parts
+of the compiler. You can execute it as:
+
+```sh
+python x.py build
+```
+
+On some systems you can also use the shorter version:
+
+```sh
+./x.py build
+```
+
+To learn more about the driver and top-level targets, you can execute:
+
+```sh
+python x.py --help
+```
+
+The general format for the driver script is:
+
+```sh
+python x.py <command> [<directory>]
+```
+
+Some example commands are `build`, `test`, and `doc`. These will build, test,
+and document the specified directory. The second argument, `<directory>`, is
+optional and defaults to working over the entire compiler. If specified,
+however, only that specific directory will be built. For example:
+
+```sh
+# build the entire compiler
+python x.py build
+
+# build all documentation
+python x.py doc
+
+# run all test suites
+python x.py test
+
+# build only the standard library
+python x.py build src/libstd
+
+# test only one particular test suite
+python x.py test src/test/rustdoc
+
+# build only the stage0 libcore library
+python x.py build src/libcore --stage 0
+```
+
+You can explore the build system throught the various `--help` pages for each
+subcommand. For example to learn more about a command you can run:
+
+```
+python x.py build --help
+```
+
+To learn about all possible rules you can execute, run:
+
+```
+python x.py build --help --verbose
+```
+
+### Useful commands
+
+Some common invocations of `x.py` are:
+
+- `x.py build --help` - show the help message and explain the subcommand
+- `x.py build src/libtest --stage 1` - build up to (and including) the first
+ stage. For most cases we don't need to build the stage2 compiler, so we can
+ save time by not building it. The stage1 compiler is a fully functioning
+ compiler and (probably) will be enough to determine if your change works as
+ expected.
+- `x.py build src/rustc --stage 1` - This will build just rustc, without libstd.
+ This is the fastest way to recompile after you changed only rustc source code.
+ Note however that the resulting rustc binary won't have a stdlib to link
+ against by default. You can build libstd once with `x.py build src/libstd`,
+ but it is is only guaranteed to work if recompiled, so if there are any issues
+ recompile it.
+- `x.py test` - build the full compiler & run all tests (takes a while). This
is what gets run by the continuous integration system against your pull
request. You should run this before submitting to make sure your tests pass
& everything builds in the correct manner.
-- `make check-stage1-std NO_REBUILD=1` - test the standard library without
- rebuilding the entire compiler
-- `make check TESTNAME=<substring-of-test-name>` - Run a matching set of tests.
+- `x.py test src/libstd --stage 1` - test the standard library without
+ recompiling stage 2.
+- `x.py test src/test/run-pass --filter TESTNAME` - Run a matching set of tests.
- `TESTNAME` should be a substring of the tests to match against e.g. it could
be the fully qualified test name, or just a part of it.
`TESTNAME=collections::hash::map::test_map::test_capacity_not_less_than_len`
or `TESTNAME=test_capacity_not_less_than_len`.
-- `make check-stage1-rpass TESTNAME=<substring-of-test-name>` - Run a single
- rpass test with the stage1 compiler (this will be quicker than running the
- command above as we only build the stage1 compiler, not the entire thing).
- You can also leave off the `-rpass` to run all stage1 test types.
-- `make check-stage1-coretest` - Run stage1 tests in `libcore`.
-- `make tidy` - Check that the source code is in compliance with Rust's style
- guidelines. There is no official document describing Rust's full guidelines
- as of yet, but basic rules like 4 spaces for indentation and no more than 99
- characters in a single line should be kept in mind when writing code.
+- `x.py test src/test/run-pass --stage 1 --filter <substring-of-test-name>` -
+ Run a single rpass test with the stage1 compiler (this will be quicker than
+ running the command above as we only build the stage1 compiler, not the entire
+ thing). You can also leave off the directory argument to run all stage1 test
+ types.
+- `x.py test src/libcore --stage 1` - Run stage1 tests in `libcore`.
+- `x.py test src/tools/tidy` - Check that the source code is in compliance with
+ Rust's style guidelines. There is no official document describing Rust's full
+ guidelines as of yet, but basic rules like 4 spaces for indentation and no
+ more than 99 characters in a single line should be kept in mind when writing
+ code.
## Pull Requests
once before running these will work, but that’s only one full build rather than
one each time.
- $ make -j8 rustc-stage1 && make check-stage1
+ $ python x.py test --stage 1
is one such example, which builds just `rustc`, and then runs the tests. If
you’re adding something to the standard library, try
- $ make -j8 check-stage1-std NO_REBUILD=1
-
-This will not rebuild the compiler, but will run the tests.
+ $ python x.py test src/libstd --stage 1
Please make sure your pull request is in compliance with Rust's style
guidelines by running
- $ make tidy
+ $ python x.py test src/tools/tidy
Make this check before every pull request (and every new commit in a pull
request) ; you can add [git hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks)
```sh
$ ./configure
- $ make && make install
+ $ make && sudo make install
```
- > ***Note:*** You may need to use `sudo make install` if you do not
- > normally have permission to modify the destination directory. The
- > install locations can be adjusted by passing a `--prefix` argument
- > to `configure`. Various other options are also supported – pass
+ > ***Note:*** Install locations can be adjusted by passing a `--prefix`
+ > argument to `configure`. Various other options are also supported – pass
> `--help` for more information on them.
- When complete, `make install` will place several programs into
+ When complete, `sudo make install` will place several programs into
`/usr/local/bin`: `rustc`, the Rust compiler, and `rustdoc`, the
API-documentation tool. This install does not include [Cargo],
Rust's package manager, which you may also want to build.
(or later) so `rustc` can use its linker. Make sure to check the “C++ tools”
option.
-With these dependencies installed, the build takes two steps:
+With these dependencies installed, you can build the compiler in a `cmd.exe`
+shell with:
```sh
-$ ./configure
-$ make && make install
+> python x.py build
```
-#### MSVC with rustbuild
-
-The old build system, based on makefiles, is currently being rewritten into a
-Rust-based build system called rustbuild. This can be used to bootstrap the
-compiler on MSVC without needing to install MSYS or MinGW. All you need are
-[Python 2](https://www.python.org/downloads/),
-[CMake](https://cmake.org/download/), and
-[Git](https://git-scm.com/downloads) in your PATH (make sure you do not use the
-ones from MSYS if you have it installed). You'll also need Visual Studio 2013 or
-newer with the C++ tools. Then all you need to do is to kick off rustbuild.
+If you're running inside of an msys shell, however, you can run:
-```
-python x.py build
+```sh
+$ ./configure --build=x86_64-pc-windows-msvc
+$ make && make install
```
-Currently rustbuild only works with some known versions of Visual Studio. If you
-have a more recent version installed that a part of rustbuild doesn't understand
+Currently building Rust only works with some known versions of Visual Studio. If
+you have a more recent version installed the build system doesn't understand
then you may need to force rustbuild to use an older version. This can be done
by manually calling the appropriate vcvars file before running the bootstrap.
$ make docs
```
-Building the documentation requires building the compiler, so the above
-details will apply. Once you have the compiler built, you can
-
-```sh
-$ make docs NO_REBUILD=1
-```
-
-To make sure you don’t re-build the compiler because you made a change
-to some documentation.
-
The generated documentation will appear in a top-level `doc` directory,
created by the `make` rule.
matrix:
# 32/64 bit MSVC
- MSYS_BITS: 64
- TARGET: x86_64-pc-windows-msvc
- CHECK: check
- CONFIGURE_ARGS: --enable-llvm-assertions --enable-debug-assertions
+ RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc
+ RUST_CHECK_TARGET: check
- MSYS_BITS: 32
- TARGET: i686-pc-windows-msvc
- CHECK: check
- CONFIGURE_ARGS: --enable-llvm-assertions --enable-debug-assertions
+ RUST_CONFIGURE_ARGS: --build=i686-pc-windows-msvc
+ RUST_CHECK_TARGET: check
- # MSVC rustbuild
+ # MSVC makefiles
- MSYS_BITS: 64
- CONFIGURE_ARGS: --enable-rustbuild --enable-llvm-assertions --enable-debug-assertions
- TARGET: x86_64-pc-windows-msvc
- CHECK: check
+ RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --disable-rustbuild
+ RUST_CHECK_TARGET: check
# MSVC cargotest
- MSYS_BITS: 64
- CONFIGURE_ARGS: --enable-rustbuild --enable-llvm-assertions --enable-debug-assertions
- TARGET: x86_64-pc-windows-msvc
- CHECK: check-cargotest
+ NO_VENDOR: 1
+ RUST_CHECK_TARGET: check-cargotest
+ RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc
# 32/64-bit MinGW builds.
#
# *not* use debug assertions and llvm assertions. This is because they take
# too long on appveyor and this is tested by rustbuild below.
- MSYS_BITS: 32
- TARGET: i686-pc-windows-gnu
- CHECK: check
+ RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu
+ RUST_CHECK_TARGET: check
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci
MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z
MINGW_DIR: mingw32
- MSYS_BITS: 32
- CONFIGURE_ARGS: --enable-rustbuild --enable-llvm-assertions --enable-debug-assertions
- TARGET: i686-pc-windows-gnu
- CHECK: check
+ RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --disable-rustbuild
+ RUST_CHECK_TARGET: check
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci
MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z
MINGW_DIR: mingw32
- MSYS_BITS: 64
- CONFIGURE_ARGS: --enable-llvm-assertions --enable-debug-assertions
- TARGET: x86_64-pc-windows-gnu
- CHECK: check
+ RUST_CHECK_TARGET: check
+ RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci
MINGW_ARCHIVE: x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z
MINGW_DIR: mingw64
- if NOT defined MINGW_URL set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH%
test_script:
- - sh ./configure
- %CONFIGURE_ARGS%
- --build=%TARGET%
- - bash -c "make -j$(nproc)"
- - bash -c "make %CHECK% -j$(nproc)"
+ - git submodule update --init
+ - set SRC=.
+ - set NO_CCACHE=1
+ - sh src/ci/run.sh
cache:
- - build/%TARGET%/llvm -> src/rustllvm/llvm-auto-clean-trigger
- - "%TARGET%/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "build/i686-pc-windows-gnu/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "build/x86_64-pc-windows-gnu/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "build/i686-pc-windows-msvc/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "build/x86_64-pc-windows-msvc/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "i686-pc-windows-gnu/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "x86_64-pc-windows-gnu/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "i686-pc-windows-msvc/llvm -> src/rustllvm/llvm-auto-clean-trigger"
+ - "x86_64-pc-windows-msvc/llvm -> src/rustllvm/llvm-auto-clean-trigger"
branches:
only:
opt dist-host-only 0 "only install bins for the host architecture"
opt inject-std-version 1 "inject the current compiler version of libstd into programs"
opt llvm-version-check 1 "check if the LLVM version is supported, build anyway"
-opt rustbuild 0 "use the rust and cargo based build system"
+opt rustbuild 1 "use the rust and cargo based build system"
opt codegen-tests 1 "run the src/test/codegen tests"
opt option-checking 1 "complain about unrecognized options in this configure script"
opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)"
valopt aarch64-linux-android-ndk "" "aarch64-linux-android NDK standalone path"
valopt nacl-cross-path "" "NaCl SDK path (Pepper Canary is recommended). Must be absolute!"
valopt musl-root "/usr/local" "MUSL root installation directory (deprecated)"
-valopt musl-root-x86_64 "/usr/local" "x86_64-unknown-linux-musl install directory"
-valopt musl-root-i686 "/usr/local" "i686-unknown-linux-musl install directory"
-valopt musl-root-arm "/usr/local" "arm-unknown-linux-musleabi install directory"
-valopt musl-root-armhf "/usr/local" "arm-unknown-linux-musleabihf install directory"
-valopt musl-root-armv7 "/usr/local" "armv7-unknown-linux-musleabihf install directory"
+valopt musl-root-x86_64 "" "x86_64-unknown-linux-musl install directory"
+valopt musl-root-i686 "" "i686-unknown-linux-musl install directory"
+valopt musl-root-arm "" "arm-unknown-linux-musleabi install directory"
+valopt musl-root-armhf "" "arm-unknown-linux-musleabihf install directory"
+valopt musl-root-armv7 "" "armv7-unknown-linux-musleabihf install directory"
valopt extra-filename "" "Additional data that is hashed and passed to the -C extra-filename flag"
if [ -e ${CFG_SRC_DIR}.git ]
fi
fi
-if [ -z "$CFG_ENABLE_RUSTBUILD" ]; then
+if [ -n "$CFG_DISABLE_RUSTBUILD" ]; then
step_msg "making directories"
step_msg "configuring submodules"
# Have to be in the top of src directory for this
-if [ -z "$CFG_DISABLE_MANAGE_SUBMODULES" ] && [ -z "$CFG_ENABLE_RUSTBUILD" ]
+if [ -z "$CFG_DISABLE_MANAGE_SUBMODULES" ] && [ -n "$CFG_DISABLE_RUSTBUILD" ]
then
cd ${CFG_SRC_DIR}
;;
esac
- if [ -n "$CFG_ENABLE_RUSTBUILD" ]
+ if [ -z "$CFG_DISABLE_RUSTBUILD" ]
then
msg "not configuring LLVM, rustbuild in use"
do_reconfigure=0
putvar $CFG_LLVM_INST_DIR
done
-if [ -n "$CFG_ENABLE_RUSTBUILD" ]
+if [ -z "$CFG_DISABLE_RUSTBUILD" ]
then
INPUT_MAKEFILE=src/bootstrap/mk/Makefile.in
else
step_msg "complete"
fi
-msg "run \`make help\`"
+if [ -z "$CFG_DISABLE_RUSTBUILD" ]; then
+ msg "NOTE you have now configured rust to use a rewritten build system"
+ msg " called rustbuild, and as a result this may have bugs that "
+ msg " you did not see before. If you experience any issues you can"
+ msg " go back to the old build system with --disable-rustbuild and"
+ msg " please feel free to report any bugs!"
+ msg ""
+ msg "run \`python x.py --help\`"
+else
+ warn "the makefile-based build system is deprecated in favor of rustbuild"
+ msg ""
+ msg "It is recommended you avoid passing --disable-rustbuild to get your"
+ msg "build working as the makefiles will be deleted on 2017-02-02. If you"
+ msg "encounter bugs with rustbuild please file issues against rust-lang/rust"
+ msg ""
+ msg "run \`make help\`"
+fi
+
msg
* `doc` - a command for building documentation. Like above can take arguments
for what to document.
-If you're more used to `./configure` and `make`, however, then you can also
-configure the build system to use rustbuild instead of the old makefiles:
-
-```
-./configure --enable-rustbuild
-make
-```
-
-Afterwards the `Makefile` which is generated will have a few commands like
-`make check`, `make tidy`, etc.
-
## Configuring rustbuild
There are currently two primary methods for configuring the rustbuild build
can also be passed as `--config path/to/config.toml` if the build system is
being invoked manually (via the python script).
+Finally, rustbuild makes use of the [gcc-rs crate] which has [its own
+method][env-vars] of configuring C compilers and C flags via environment
+variables.
+
+[gcc-rs crate]: https://github.com/alexcrichton/gcc-rs
+[env-vars]: https://github.com/alexcrichton/gcc-rs#external-configuration-via-environment-variables
+
## Build stages
The rustbuild build system goes through a few phases to actually build the
you up and running. Some general areas that you may be interested in modifying
are:
-* Adding a new build tool? Take a look at `build/step.rs` for examples of other
- tools, as well as `build/mod.rs`.
+* Adding a new build tool? Take a look at `bootstrap/step.rs` for examples of
+ other tools.
* Adding a new compiler crate? Look no further! Adding crates can be done by
adding a new directory with `Cargo.toml` followed by configuring all
`Cargo.toml` files accordingly.
* Adding a new dependency from crates.io? We're still working on that, so hold
off on that for now.
-* Adding a new configuration option? Take a look at `build/config.rs` or perhaps
- `build/flags.rs` and then modify the build elsewhere to read that option.
-* Adding a sanity check? Take a look at `build/sanity.rs`.
+* Adding a new configuration option? Take a look at `bootstrap/config.rs` or
+ perhaps `bootstrap/flags.rs` and then modify the build elsewhere to read that
+ option.
+* Adding a sanity check? Take a look at `bootstrap/sanity.rs`.
If you have any questions feel free to reach out on `#rust-internals` on IRC or
open an issue in the bug tracker!
cmd.arg("-C").arg(format!("codegen-units={}", s));
}
+ // Emit save-analysis info.
+ if env::var("RUSTC_SAVE_ANALYSIS") == Ok("api".to_string()) {
+ cmd.arg("-Zsave-analysis-api");
+ }
+
// Dealing with rpath here is a little special, so let's go into some
// detail. First off, `-rpath` is a linker option on Unix platforms
// which adds to the runtime dynamic loader path when looking for
sha_path = sha_file.name
try:
- download(sha_path, sha_url, verbose)
+ download(sha_path, sha_url, False, verbose)
if os.path.exists(path):
if verify(path, sha_path, False):
- print("using already-download file " + path)
+ if verbose:
+ print("using already-download file " + path)
return
else:
- print("ignoring already-download file " + path + " due to failed verification")
+ if verbose:
+ print("ignoring already-download file " + path + " due to failed verification")
os.unlink(path)
- download(temp_path, url, verbose)
- if not verify(temp_path, sha_path, True):
+ download(temp_path, url, True, verbose)
+ if not verify(temp_path, sha_path, verbose):
raise RuntimeError("failed verification")
- print("moving {} to {}".format(temp_path, path))
+ if verbose:
+ print("moving {} to {}".format(temp_path, path))
shutil.move(temp_path, path)
finally:
- delete_if_present(sha_path)
- delete_if_present(temp_path)
+ delete_if_present(sha_path, verbose)
+ delete_if_present(temp_path, verbose)
-def delete_if_present(path):
+def delete_if_present(path, verbose):
if os.path.isfile(path):
- print("removing " + path)
+ if verbose:
+ print("removing " + path)
os.unlink(path)
-def download(path, url, verbose):
- print("downloading {} to {}".format(url, path))
+def download(path, url, probably_big, verbose):
+ if probably_big or verbose:
+ print("downloading {}".format(url))
# see http://serverfault.com/questions/301128/how-to-download
if sys.platform == 'win32':
run(["PowerShell.exe", "/nologo", "-Command",
".DownloadFile('{}', '{}')".format(url, path)],
verbose=verbose)
else:
- run(["curl", "-o", path, url], verbose=verbose)
+ if probably_big or verbose:
+ option = "-#"
+ else:
+ option = "-s"
+ run(["curl", option, "-Sf", "-o", path, url], verbose=verbose)
def verify(path, sha_path, verbose):
- print("verifying " + path)
+ if verbose:
+ print("verifying " + path)
with open(path, "rb") as f:
found = hashlib.sha256(f.read()).hexdigest()
with open(sha_path, "r") as f:
expected, _ = f.readline().split()
verified = found == expected
- if not verified and verbose:
+ if not verified:
print("invalid checksum:\n"
" found: {}\n"
" expected: {}".format(found, expected))
if self.rustc().startswith(self.bin_root()) and \
(not os.path.exists(self.rustc()) or self.rustc_out_of_date()):
+ self.print_what_it_means_to_bootstrap()
if os.path.exists(self.bin_root()):
shutil.rmtree(self.bin_root())
channel = self.stage0_rustc_channel()
if self.cargo().startswith(self.bin_root()) and \
(not os.path.exists(self.cargo()) or self.cargo_out_of_date()):
+ self.print_what_it_means_to_bootstrap()
channel = self.stage0_cargo_channel()
filename = "cargo-{}-{}.tar.gz".format(channel, self.build)
url = "https://static.rust-lang.org/cargo-dist/" + self.stage0_cargo_date()
else:
return ''
+ def print_what_it_means_to_bootstrap(self):
+ if hasattr(self, 'printed'):
+ return
+ self.printed = True
+ if os.path.exists(self.bootstrap_binary()):
+ return
+ if not '--help' in sys.argv or len(sys.argv) == 1:
+ return
+
+ print('info: the build system for Rust is written in Rust, so this')
+ print(' script is now going to download a stage0 rust compiler')
+ print(' and then compile the build system itself')
+ print('')
+ print('info: in the meantime you can read more about rustbuild at')
+ print(' src/bootstrap/README.md before the download finishes')
+
+ def bootstrap_binary(self):
+ return os.path.join(self.build_dir, "bootstrap/debug/bootstrap")
+
def build_bootstrap(self):
+ self.print_what_it_means_to_bootstrap()
build_dir = os.path.join(self.build_dir, "bootstrap")
if self.clean and os.path.exists(build_dir):
shutil.rmtree(build_dir)
rb.use_vendored_sources = '\nvendor = true' in rb.config_toml or \
'CFG_ENABLE_VENDOR' in rb.config_mk
+ if 'SUDO_USER' in os.environ:
+ if os.environ['USER'] != os.environ['SUDO_USER']:
+ rb.use_vendored_sources = True
+ print('info: looks like you are running this command under `sudo`')
+ print(' and so in order to preserve your $HOME this will now')
+ print(' use vendored sources by default. Note that if this')
+ print(' does not work you should run a normal build first')
+ print(' before running a command like `sudo make intall`')
+
if rb.use_vendored_sources:
if not os.path.exists('.cargo'):
os.makedirs('.cargo')
- f = open('.cargo/config','w')
- f.write("""
- [source.crates-io]
- replace-with = 'vendored-sources'
- registry = 'https://example.com'
-
- [source.vendored-sources]
- directory = '{}/src/vendor'
- """.format(rb.rust_root))
- f.close()
+ with open('.cargo/config','w') as f:
+ f.write("""
+ [source.crates-io]
+ replace-with = 'vendored-sources'
+ registry = 'https://example.com'
+
+ [source.vendored-sources]
+ directory = '{}/src/vendor'
+ """.format(rb.rust_root))
else:
if os.path.exists('.cargo'):
shutil.rmtree('.cargo')
+
data = stage0_data(rb.rust_root)
rb._rustc_channel, rb._rustc_date = data['rustc'].split('-', 1)
rb._cargo_channel, rb._cargo_date = data['cargo'].split('-', 1)
sys.stdout.flush()
# Run the bootstrap
- args = [os.path.join(rb.build_dir, "bootstrap/debug/bootstrap")]
+ args = [rb.bootstrap_binary()]
args.extend(sys.argv[1:])
env = os.environ.copy()
env["BUILD"] = rb.build
if let Some(cc) = config.and_then(|c| c.cc.as_ref()) {
cfg.compiler(cc);
} else {
- set_compiler(&mut cfg, "gcc", target, config);
+ set_compiler(&mut cfg, "gcc", target, config, build);
}
let compiler = cfg.get_compiler();
if let Some(cxx) = config.and_then(|c| c.cxx.as_ref()) {
cfg.compiler(cxx);
} else {
- set_compiler(&mut cfg, "g++", host, config);
+ set_compiler(&mut cfg, "g++", host, config, build);
}
let compiler = cfg.get_compiler();
build.verbose(&format!("CXX_{} = {:?}", host, compiler.path()));
fn set_compiler(cfg: &mut gcc::Config,
gnu_compiler: &str,
target: &str,
- config: Option<&Target>) {
+ config: Option<&Target>,
+ build: &Build) {
match target {
// When compiling for android we may have the NDK configured in the
// config.toml in which case we look there. Otherwise the default
}
}
+ "mips-unknown-linux-musl" => {
+ cfg.compiler("mips-linux-musl-gcc");
+ }
+ "mipsel-unknown-linux-musl" => {
+ cfg.compiler("mipsel-linux-musl-gcc");
+ }
+
+ t if t.contains("musl") => {
+ if let Some(root) = build.musl_root(target) {
+ let guess = root.join("bin/musl-gcc");
+ if guess.exists() {
+ cfg.compiler(guess);
+ }
+ }
+ }
+
_ => {}
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Implementation of the various `check-*` targets of the build system.
+//! Implementation of the test-related targets of the build system.
//!
//! This file implements the various regression test suites that we execute on
//! our CI.
use build_helper::output;
use {Build, Compiler, Mode};
+use dist;
use util::{self, dylib_path, dylib_path_var};
const ADB_TEST_DIR: &'static str = "/data/tmp";
pub fn linkcheck(build: &Build, stage: u32, host: &str) {
println!("Linkcheck stage{} ({})", stage, host);
let compiler = Compiler::new(stage, host);
+
+ let _time = util::timeit();
build.run(build.tool_cmd(&compiler, "linkchecker")
.arg(build.out.join(host).join("doc")));
}
let out_dir = build.out.join("ct");
t!(fs::create_dir_all(&out_dir));
+ let _time = util::timeit();
build.run(build.tool_cmd(compiler, "cargotest")
.env("PATH", newpath)
.arg(&build.cargo)
target: &str,
mode: &str,
suite: &str) {
- println!("Check compiletest {} ({} -> {})", suite, compiler.host, target);
+ println!("Check compiletest suite={} mode={} ({} -> {})",
+ suite, mode, compiler.host, target);
let mut cmd = build.tool_cmd(compiler, "compiletest");
// compiletest currently has... a lot of arguments, so let's just pass all
// Running a C compiler on MSVC requires a few env vars to be set, to be
// sure to set them here.
+ //
+ // Note that if we encounter `PATH` we make sure to append to our own `PATH`
+ // rather than stomp over it.
if target.contains("msvc") {
for &(ref k, ref v) in build.cc[target].0.env() {
if k != "PATH" {
}
}
cmd.env("RUSTC_BOOTSTRAP", "1");
+ build.add_rust_test_threads(&mut cmd);
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
cmd.arg("--android-cross-path").arg("");
}
+ let _time = util::timeit();
build.run(&mut cmd);
}
// Do a breadth-first traversal of the `src/doc` directory and just run
// tests for all files that end in `*.md`
let mut stack = vec![build.src.join("src/doc")];
+ let _time = util::timeit();
while let Some(p) = stack.pop() {
if p.is_dir() {
let dir = testdir(build, compiler.host);
t!(fs::create_dir_all(&dir));
let output = dir.join("error-index.md");
+
+ let _time = util::timeit();
build.run(build.tool_cmd(compiler, "error_index_generator")
.arg("markdown")
.arg(&output)
fn markdown_test(build: &Build, compiler: &Compiler, markdown: &Path) {
let mut cmd = Command::new(build.rustdoc(compiler));
build.add_rustc_lib_path(compiler, &mut cmd);
+ build.add_rust_test_threads(&mut cmd);
cmd.arg("--test");
cmd.arg(markdown);
dylib_path.insert(0, build.sysroot_libdir(compiler, target));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
+ if target.contains("android") {
+ cargo.arg("--no-run");
+ } else if target.contains("emscripten") {
+ cargo.arg("--no-run");
+ }
+
+ cargo.arg("--");
+
if build.config.quiet_tests {
- cargo.arg("--");
cargo.arg("--quiet");
}
+ let _time = util::timeit();
+
if target.contains("android") {
- build.run(cargo.arg("--no-run"));
+ build.run(&mut cargo);
krate_android(build, compiler, target, mode);
} else if target.contains("emscripten") {
- build.run(cargo.arg("--no-run"));
+ build.run(&mut cargo);
krate_emscripten(build, compiler, target, mode);
} else {
cargo.args(&build.flags.cmd.test_args());
target,
compiler.host,
test_file_name);
+ let quiet = if build.config.quiet_tests { "--quiet" } else { "" };
let program = format!("(cd {dir}; \
LD_LIBRARY_PATH=./{target} ./{test} \
--logfile {log} \
+ {quiet} \
{args})",
dir = ADB_TEST_DIR,
target = target,
test = test_file_name,
log = log,
+ quiet = quiet,
args = build.flags.cmd.test_args().join(" "));
let output = output(Command::new("adb").arg("shell").arg(&program));
let test_file_name = test.to_string_lossy().into_owned();
println!("running {}", test_file_name);
let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured");
- let status = Command::new(nodejs)
- .arg(&test_file_name)
- .stderr(::std::process::Stdio::inherit())
- .status();
- match status {
- Ok(status) => {
- if !status.success() {
- panic!("some tests failed");
- }
- }
- Err(e) => panic!(format!("failed to execute command: {}", e)),
- };
+ let mut cmd = Command::new(nodejs);
+ cmd.arg(&test_file_name)
+ .stderr(::std::process::Stdio::inherit());
+ if build.config.quiet_tests {
+ cmd.arg("--quiet");
+ }
+ build.run(&mut cmd);
}
}
}
}
}
+
+/// Run "distcheck", a 'make check' from a tarball
+pub fn distcheck(build: &Build) {
+ if build.config.build != "x86_64-unknown-linux-gnu" {
+ return
+ }
+ if !build.config.host.iter().any(|s| s == "x86_64-unknown-linux-gnu") {
+ return
+ }
+ if !build.config.target.iter().any(|s| s == "x86_64-unknown-linux-gnu") {
+ return
+ }
+
+ let dir = build.out.join("tmp").join("distcheck");
+ let _ = fs::remove_dir_all(&dir);
+ t!(fs::create_dir_all(&dir));
+
+ let mut cmd = Command::new("tar");
+ cmd.arg("-xzf")
+ .arg(dist::rust_src_location(build))
+ .arg("--strip-components=1")
+ .current_dir(&dir);
+ build.run(&mut cmd);
+ build.run(Command::new("./configure")
+ .current_dir(&dir));
+ build.run(Command::new("make")
+ .arg("check")
+ .current_dir(&dir));
+}
if !path.exists() {
return
}
+ if path.is_file() {
+ return do_op(path, "remove file", |p| fs::remove_file(p));
+ }
for file in t!(fs::read_dir(path)) {
let file = t!(file).path();
use std::path::{PathBuf, Path};
use std::process::Command;
-use {Build, Compiler};
+use {Build, Compiler, Mode};
use util::{cp_r, libdir, is_dylib, cp_filtered, copy};
pub fn package_vers(build: &Build) -> &str {
t!(fs::remove_dir_all(&image));
}
+pub fn rust_src_location(build: &Build) -> PathBuf {
+ let plain_name = format!("rustc-{}-src", package_vers(build));
+ distdir(build).join(&format!("{}.tar.gz", plain_name))
+}
+
+/// Creates a tarball of save-analysis metadata, if available.
+pub fn analysis(build: &Build, compiler: &Compiler, target: &str) {
+ println!("Dist analysis");
+
+ if build.config.channel != "nightly" {
+ println!("Skipping dist-analysis - not on nightly channel");
+ return;
+ }
+ if compiler.stage != 2 {
+ return
+ }
+
+ let name = format!("rust-analysis-{}", package_vers(build));
+ let image = tmpdir(build).join(format!("{}-{}-image", name, target));
+
+ let src = build.stage_out(compiler, Mode::Libstd).join(target).join("release").join("deps");
+
+ let image_src = src.join("save-analysis");
+ let dst = image.join("lib/rustlib").join(target).join("analysis");
+ t!(fs::create_dir_all(&dst));
+ cp_r(&image_src, &dst);
+
+ let mut cmd = Command::new("sh");
+ cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh")))
+ .arg("--product-name=Rust")
+ .arg("--rel-manifest-dir=rustlib")
+ .arg("--success-message=save-analysis-saved.")
+ .arg(format!("--image-dir={}", sanitize_sh(&image)))
+ .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build))))
+ .arg(format!("--output-dir={}", sanitize_sh(&distdir(build))))
+ .arg(format!("--package-name={}-{}", name, target))
+ .arg(format!("--component-name=rust-analysis-{}", target))
+ .arg("--legacy-manifest-dirs=rustlib,cargo");
+ build.run(&mut cmd);
+ t!(fs::remove_dir_all(&image));
+
+ // Create plain source tarball
+ let mut cmd = Command::new("tar");
+ cmd.arg("-czf").arg(sanitize_sh(&distdir(build).join(&format!("{}.tar.gz", name))))
+ .arg("analysis")
+ .current_dir(&src);
+ build.run(&mut cmd);
+}
+
/// Creates the `rust-src` installer component and the plain source tarball
pub fn rust_src(build: &Build) {
println!("Dist src");
// Create plain source tarball
let mut cmd = Command::new("tar");
- cmd.arg("-czf").arg(sanitize_sh(&distdir(build).join(&format!("{}.tar.gz", plain_name))))
+ cmd.arg("-czf").arg(sanitize_sh(&rust_src_location(build)))
.arg(&plain_name)
.current_dir(&dst);
build.run(&mut cmd);
install: m.opt_present("install"),
}
}
+ "--help" => usage(0, &opts),
cmd => {
println!("unknown command: {}", cmd);
usage(1, &opts);
//! This module, and its descendants, are the implementation of the Rust build
//! system. Most of this build system is backed by Cargo but the outer layer
//! here serves as the ability to orchestrate calling Cargo, sequencing Cargo
-//! builds, building artifacts like LLVM, etc.
+//! builds, building artifacts like LLVM, etc. The goals of rustbuild are:
//!
-//! More documentation can be found in each respective module below.
+//! * To be an easily understandable, easily extensible, and maintainable build
+//! system.
+//! * Leverage standard tools in the Rust ecosystem to build the compiler, aka
+//! crates.io and Cargo.
+//! * A standard interface to build across all platforms, including MSVC
+//!
+//! ## Architecture
+//!
+//! Although this build system defers most of the complicated logic to Cargo
+//! itself, it still needs to maintain a list of targets and dependencies which
+//! it can itself perform. Rustbuild is made up of a list of rules with
+//! dependencies amongst them (created in the `step` module) and then knows how
+//! to execute each in sequence. Each time rustbuild is invoked, it will simply
+//! iterate through this list of steps and execute each serially in turn. For
+//! each step rustbuild relies on the step internally being incremental and
+//! parallel. Note, though, that the `-j` parameter to rustbuild gets forwarded
+//! to appropriate test harnesses and such.
+//!
+//! Most of the "meaty" steps that matter are backed by Cargo, which does indeed
+//! have its own parallelism and incremental management. Later steps, like
+//! tests, aren't incremental and simply run the entire suite currently.
+//!
+//! When you execute `x.py build`, the steps which are executed are:
+//!
+//! * First, the python script is run. This will automatically download the
+//! stage0 rustc and cargo according to `src/stage0.txt`, or using the cached
+//! versions if they're available. These are then used to compile rustbuild
+//! itself (using Cargo). Finally, control is then transferred to rustbuild.
+//!
+//! * Rustbuild takes over, performs sanity checks, probes the environment,
+//! reads configuration, builds up a list of steps, and then starts executing
+//! them.
+//!
+//! * The stage0 libstd is compiled
+//! * The stage0 libtest is compiled
+//! * The stage0 librustc is compiled
+//! * The stage1 compiler is assembled
+//! * The stage1 libstd, libtest, librustc are compiled
+//! * The stage2 compiler is assembled
+//! * The stage2 libstd, libtest, librustc are compiled
+//!
+//! Each step is driven by a separate Cargo project and rustbuild orchestrates
+//! copying files between steps and otherwise preparing for Cargo to run.
+//!
+//! ## Further information
+//!
+//! More documentation can be found in each respective module below, and you can
+//! also check out the `src/bootstrap/README.md` file for more information.
extern crate build_helper;
extern crate cmake;
use std::collections::HashMap;
use std::env;
+use std::ffi::OsString;
use std::fs::{self, File};
use std::path::{Component, PathBuf, Path};
use std::process::Command;
cc: HashMap<String, (gcc::Tool, Option<PathBuf>)>,
cxx: HashMap<String, gcc::Tool>,
crates: HashMap<String, Crate>,
+ is_sudo: bool,
}
#[derive(Debug)]
};
let local_rebuild = config.local_rebuild;
+ let is_sudo = match env::var_os("SUDO_USER") {
+ Some(sudo_user) => {
+ match env::var_os("USER") {
+ Some(user) => user != sudo_user,
+ None => false,
+ }
+ }
+ None => false,
+ };
+
Build {
flags: flags,
config: config,
crates: HashMap::new(),
lldb_version: None,
lldb_python_dir: None,
+ is_sudo: is_sudo,
}
}
// how the actual compiler itself is called.
//
// These variables are primarily all read by
- // src/bootstrap/{rustc,rustdoc.rs}
+ // src/bootstrap/bin/{rustc.rs,rustdoc.rs}
cargo.env("RUSTC", self.out.join("bootstrap/debug/rustc"))
.env("RUSTC_REAL", self.compiler_path(compiler))
.env("RUSTC_STAGE", stage.to_string())
// Enable usage of unstable features
cargo.env("RUSTC_BOOTSTRAP", "1");
+ self.add_rust_test_threads(&mut cargo);
// Specify some various options for build scripts used throughout
// the build.
.env(format!("CFLAGS_{}", target), self.cflags(target).join(" "));
}
+ if self.config.channel == "nightly" && compiler.stage == 2 {
+ cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string());
+ }
+
// Environment variables *required* needed throughout the build
//
// FIXME: should update code to not require this env var
if self.config.rust_optimize && cmd != "bench" {
cargo.arg("--release");
}
- if self.config.vendor {
+ if self.config.vendor || self.is_sudo {
cargo.arg("--frozen");
}
return cargo
fn tool_cmd(&self, compiler: &Compiler, tool: &str) -> Command {
let mut cmd = Command::new(self.tool(&compiler, tool));
let host = compiler.host;
- let paths = vec![
+ let mut paths = vec![
self.cargo_out(compiler, Mode::Libstd, host).join("deps"),
self.cargo_out(compiler, Mode::Libtest, host).join("deps"),
self.cargo_out(compiler, Mode::Librustc, host).join("deps"),
self.cargo_out(compiler, Mode::Tool, host).join("deps"),
];
+
+ // On MSVC a tool may invoke a C compiler (e.g. compiletest in run-make
+ // mode) and that C compiler may need some extra PATH modification. Do
+ // so here.
+ if compiler.host.contains("msvc") {
+ let curpaths = env::var_os("PATH").unwrap_or(OsString::new());
+ let curpaths = env::split_paths(&curpaths).collect::<Vec<_>>();
+ for &(ref k, ref v) in self.cc[compiler.host].0.env() {
+ if k != "PATH" {
+ continue
+ }
+ for path in env::split_paths(v) {
+ if !curpaths.contains(&path) {
+ paths.push(path);
+ }
+ }
+ }
+ }
add_lib_path(paths, &mut cmd);
return cmd
}
add_lib_path(vec![self.rustc_libdir(compiler)], cmd);
}
+ /// Adds the `RUST_TEST_THREADS` env var if necessary
+ fn add_rust_test_threads(&self, cmd: &mut Command) {
+ if env::var_os("RUST_TEST_THREADS").is_none() {
+ cmd.env("RUST_TEST_THREADS", self.jobs().to_string());
+ }
+ }
+
/// Returns the compiler's libdir where it stores the dynamic libraries that
/// it itself links against.
///
$(Q)$(BOOTSTRAP) build $(BOOTSTRAP_ARGS)
$(Q)$(BOOTSTRAP) doc $(BOOTSTRAP_ARGS)
-# Don’t use $(Q) here, always show how to invoke the bootstrap script directly
help:
- $(BOOTSTRAP) --help
+ $(Q)echo 'Welcome to the rustbuild build system!'
+ $(Q)echo
+ $(Q)echo This makefile is a thin veneer over the ./x.py script located
+ $(Q)echo in this directory. To get the full power of the build system
+ $(Q)echo you can run x.py directly.
+ $(Q)echo
+ $(Q)echo To learn more run \`./x.py --help\`
clean:
$(Q)$(BOOTSTRAP) clean $(BOOTSTRAP_ARGS)
$(Q)$(BOOTSTRAP) test src/tools/cargotest $(BOOTSTRAP_ARGS)
dist:
$(Q)$(BOOTSTRAP) dist $(BOOTSTRAP_ARGS)
+distcheck:
+ $(Q)$(BOOTSTRAP) test distcheck
install:
-ifeq (root user, $(USER) $(patsubst %,user,$(SUDO_USER)))
- $(Q)echo "'sudo make install' is not supported currently."
-else
$(Q)$(BOOTSTRAP) dist --install $(BOOTSTRAP_ARGS)
-endif
tidy:
$(Q)$(BOOTSTRAP) test src/tools/tidy $(BOOTSTRAP_ARGS) --stage 0
-check-stage2-android:
- $(Q)$(BOOTSTRAP) --step check-target --target arm-linux-androideabi
+check-stage2-T-arm-linux-androideabi-H-x86_64-unknown-linux-gnu:
+ $(Q)$(BOOTSTRAP) test --target arm-linux-androideabi
+check-stage2-T-x86_64-unknown-linux-musl-H-x86_64-unknown-linux-gnu:
+ $(Q)$(BOOTSTRAP) test --target x86_64-unknown-linux-gnu
+
.PHONY: dist
use gcc;
use Build;
-use util::up_to_date;
+use util::{self, up_to_date};
/// Compile LLVM for `target`.
pub fn llvm(build: &Build, target: &str) {
println!("Building LLVM for {}", target);
+ let _time = util::timeit();
let _ = fs::remove_dir_all(&dst.join("build"));
t!(fs::create_dir_all(&dst.join("build")));
let assertions = if build.config.llvm_assertions {"ON"} else {"OFF"};
println!("Building test helpers");
t!(fs::create_dir_all(&dst));
let mut cfg = gcc::Config::new();
+
+ // We may have found various cross-compilers a little differently due to our
+ // extra configuration, so inform gcc of these compilers. Note, though, that
+ // on MSVC we still need gcc's detection of env vars (ugh).
+ if !target.contains("msvc") {
+ if let Some(ar) = build.ar(target) {
+ cfg.archiver(ar);
+ }
+ cfg.compiler(build.cc(target));
+ }
+
cfg.cargo_metadata(false)
.out_dir(&dst)
.target(target)
}
}
let have_cmd = |cmd: &OsStr| {
- for path in env::split_paths(&path).map(|p| p.join(cmd)) {
- if fs::metadata(&path).is_ok() ||
- fs::metadata(path.with_extension("exe")).is_ok() {
- return Some(path);
+ for path in env::split_paths(&path) {
+ let target = path.join(cmd);
+ let mut cmd_alt = cmd.to_os_string();
+ cmd_alt.push(".exe");
+ if target.exists() ||
+ target.with_extension("exe").exists() ||
+ target.join(cmd_alt).exists() {
+ return Some(target);
}
}
return None;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+//! Definition of steps of the build system.
+//!
+//! This is where some of the real meat of rustbuild is located, in how we
+//! define targets and the dependencies amongst them. This file can sort of be
+//! viewed as just defining targets in a makefile which shell out to predefined
+//! functions elsewhere about how to execute the target.
+//!
+//! The primary function here you're likely interested in is the `build_rules`
+//! function. This will create a `Rules` structure which basically just lists
+//! everything that rustbuild can do. Each rule has a human-readable name, a
+//! path associated with it, some dependencies, and then a closure of how to
+//! actually perform the rule.
+//!
+//! All steps below are defined in self-contained units, so adding a new target
+//! to the build system should just involve adding the meta information here
+//! along with the actual implementation elsewhere. You can find more comments
+//! about how to define rules themselves below.
+
use std::collections::{HashMap, HashSet};
use std::mem;
use native;
use {Compiler, Build, Mode};
-#[derive(PartialEq, Eq, Hash, Clone, Debug)]
-struct Step<'a> {
- name: &'a str,
- stage: u32,
- host: &'a str,
- target: &'a str,
-}
-
-impl<'a> Step<'a> {
- fn name(&self, name: &'a str) -> Step<'a> {
- Step { name: name, ..*self }
- }
-
- fn stage(&self, stage: u32) -> Step<'a> {
- Step { stage: stage, ..*self }
- }
-
- fn host(&self, host: &'a str) -> Step<'a> {
- Step { host: host, ..*self }
- }
-
- fn target(&self, target: &'a str) -> Step<'a> {
- Step { target: target, ..*self }
- }
-
- fn compiler(&self) -> Compiler<'a> {
- Compiler::new(self.stage, self.host)
- }
-}
-
pub fn run(build: &Build) {
let rules = build_rules(build);
let steps = rules.plan();
}
pub fn build_rules(build: &Build) -> Rules {
- let mut rules: Rules = Rules::new(build);
+ let mut rules = Rules::new(build);
+
+ // This is the first rule that we're going to define for rustbuild, which is
+ // used to compile LLVM itself. All rules are added through the `rules`
+ // structure created above and are configured through a builder-style
+ // interface.
+ //
+ // First up we see the `build` method. This represents a rule that's part of
+ // the top-level `build` subcommand. For example `./x.py build` is what this
+ // is associating with. Note that this is normally only relevant if you flag
+ // a rule as `default`, which we'll talk about later.
+ //
+ // Next up we'll see two arguments to this method:
+ //
+ // * `llvm` - this is the "human readable" name of this target. This name is
+ // not accessed anywhere outside this file itself (e.g. not in
+ // the CLI nor elsewhere in rustbuild). The purpose of this is to
+ // easily define dependencies between rules. That is, other rules
+ // will depend on this with the name "llvm".
+ // * `src/llvm` - this is the relevant path to the rule that we're working
+ // with. This path is the engine behind how commands like
+ // `./x.py build src/llvm` work. This should typically point
+ // to the relevant component, but if there's not really a
+ // path to be assigned here you can pass something like
+ // `path/to/nowhere` to ignore it.
+ //
+ // After we create the rule with the `build` method we can then configure
+ // various aspects of it. For example this LLVM rule uses `.host(true)` to
+ // flag that it's a rule only for host targets. In other words, LLVM isn't
+ // compiled for targets configured through `--target` (e.g. those we're just
+ // building a standard library for).
+ //
+ // Next up the `dep` method will add a dependency to this rule. The closure
+ // is yielded the step that represents executing the `llvm` rule itself
+ // (containing information like stage, host, target, ...) and then it must
+ // return a target that the step depends on. Here LLVM is actually
+ // interesting where a cross-compiled LLVM depends on the host LLVM, but
+ // otherwise it has no dependencies.
+ //
+ // To handle this we do a bit of dynamic dispatch to see what the dependency
+ // is. If we're building a LLVM for the build triple, then we don't actually
+ // have any dependencies! To do that we return a dependency on the "dummy"
+ // target which does nothing.
+ //
+ // If we're build a cross-compiled LLVM, however, we need to assemble the
+ // libraries from the previous compiler. This step has the same name as
+ // ours (llvm) but we want it for a different target, so we use the
+ // builder-style methods on `Step` to configure this target to the build
+ // triple.
+ //
+ // Finally, to finish off this rule, we define how to actually execute it.
+ // That logic is all defined in the `native` module so we just delegate to
+ // the relevant function there. The argument to the closure passed to `run`
+ // is a `Step` (defined below) which encapsulates information like the
+ // stage, target, host, etc.
+ rules.build("llvm", "src/llvm")
+ .host(true)
+ .dep(move |s| {
+ if s.target == build.config.build {
+ dummy(s, build)
+ } else {
+ s.target(&build.config.build)
+ }
+ })
+ .run(move |s| native::llvm(build, s.target));
+
+ // Ok! After that example rule that's hopefully enough to explain what's
+ // going on here. You can check out the API docs below and also see a bunch
+ // more examples of rules directly below as well.
+
// dummy rule to do nothing, useful when a dep maps to no deps
rules.build("dummy", "path/to/nowhere");
- fn dummy<'a>(s: &Step<'a>, build: &'a Build) -> Step<'a> {
- s.name("dummy").stage(0)
- .target(&build.config.build)
- .host(&build.config.build)
- }
+
+ // the compiler with no target libraries ready to go
+ rules.build("rustc", "src/rustc")
+ .dep(move |s| {
+ if s.stage == 0 {
+ dummy(s, build)
+ } else {
+ s.name("librustc")
+ .host(&build.config.build)
+ .stage(s.stage - 1)
+ }
+ })
+ .run(move |s| compile::assemble_rustc(build, s.stage, s.target));
// Helper for loading an entire DAG of crates, rooted at `name`
let krates = |name: &str| {
return ret
};
- rules.build("rustc", "path/to/nowhere")
- .dep(move |s| {
- if s.stage == 0 {
- dummy(s, build)
- } else {
- s.name("librustc")
- .host(&build.config.build)
- .stage(s.stage - 1)
- }
- })
- .run(move |s| compile::assemble_rustc(build, s.stage, s.target));
- rules.build("llvm", "src/llvm")
- .host(true)
- .dep(move |s| {
- if s.target == build.config.build {
- dummy(s, build)
- } else {
- s.target(&build.config.build)
- }
- })
- .run(move |s| native::llvm(build, s.target));
-
// ========================================================================
// Crate compilations
//
.host(true)
.run(move |s| check::cargotest(build, s.stage, s.target));
rules.test("check-tidy", "src/tools/tidy")
- .dep(|s| s.name("tool-tidy"))
+ .dep(|s| s.name("tool-tidy").stage(0))
.default(true)
.host(true)
- .run(move |s| check::tidy(build, s.stage, s.target));
+ .run(move |s| check::tidy(build, 0, s.target));
rules.test("check-error-index", "src/tools/error_index_generator")
.dep(|s| s.name("libstd"))
.dep(|s| s.name("tool-error-index").host(s.host))
.default(true)
.host(true)
.run(move |s| check::docs(build, &s.compiler()));
+ rules.test("check-distcheck", "distcheck")
+ .dep(|s| s.name("dist-src"))
+ .run(move |_| check::distcheck(build));
+
rules.build("test-helpers", "src/rt/rust_test_helpers.c")
.run(move |s| native::test_helpers(build, s.target));
.default(true)
.dep(|s| s.name("default:doc"))
.run(move |s| dist::docs(build, s.stage, s.target));
+ rules.dist("dist-analysis", "src/libstd")
+ .dep(|s| s.name("dist-std"))
+ .default(true)
+ .run(move |s| dist::analysis(build, &s.compiler(), s.target));
rules.dist("install", "src")
.dep(|s| s.name("default:dist"))
.run(move |s| install::install(build, s.stage, s.target));
rules.verify();
- return rules
+ return rules;
+
+ fn dummy<'a>(s: &Step<'a>, build: &'a Build) -> Step<'a> {
+ s.name("dummy").stage(0)
+ .target(&build.config.build)
+ .host(&build.config.build)
+ }
+}
+
+#[derive(PartialEq, Eq, Hash, Clone, Debug)]
+struct Step<'a> {
+ /// Human readable name of the rule this step is executing. Possible names
+ /// are all defined above in `build_rules`.
+ name: &'a str,
+
+ /// The stage this step is executing in. This is typically 0, 1, or 2.
+ stage: u32,
+
+ /// This step will likely involve a compiler, and the target that compiler
+ /// itself is built for is called the host, this variable. Typically this is
+ /// the target of the build machine itself.
+ host: &'a str,
+
+ /// The target that this step represents generating. If you're building a
+ /// standard library for a new suite of targets, for example, this'll be set
+ /// to those targets.
+ target: &'a str,
+}
+
+impl<'a> Step<'a> {
+ /// Creates a new step which is the same as this, except has a new name.
+ fn name(&self, name: &'a str) -> Step<'a> {
+ Step { name: name, ..*self }
+ }
+
+ /// Creates a new step which is the same as this, except has a new stage.
+ fn stage(&self, stage: u32) -> Step<'a> {
+ Step { stage: stage, ..*self }
+ }
+
+ /// Creates a new step which is the same as this, except has a new host.
+ fn host(&self, host: &'a str) -> Step<'a> {
+ Step { host: host, ..*self }
+ }
+
+ /// Creates a new step which is the same as this, except has a new target.
+ fn target(&self, target: &'a str) -> Step<'a> {
+ Step { target: target, ..*self }
+ }
+
+ /// Returns the `Compiler` structure that this step corresponds to.
+ fn compiler(&self) -> Compiler<'a> {
+ Compiler::new(self.stage, self.host)
+ }
}
struct Rule<'a> {
+ /// The human readable name of this target, defined in `build_rules`.
name: &'a str,
+
+ /// The path associated with this target, used in the `./x.py` driver for
+ /// easy and ergonomic specification of what to do.
path: &'a str,
+
+ /// The "kind" of top-level command that this rule is associated with, only
+ /// relevant if this is a default rule.
kind: Kind,
+
+ /// List of dependencies this rule has. Each dependency is a function from a
+ /// step that's being executed to another step that should be executed.
deps: Vec<Box<Fn(&Step<'a>) -> Step<'a> + 'a>>,
+
+ /// How to actually execute this rule. Takes a step with contextual
+ /// information and then executes it.
run: Box<Fn(&Step<'a>) + 'a>,
+
+ /// Whether or not this is a "default" rule. That basically means that if
+ /// you run, for example, `./x.py test` whether it's included or not.
default: bool,
+
+ /// Whether or not this is a "host" rule, or in other words whether this is
+ /// only intended for compiler hosts and not for targets that are being
+ /// generated.
host: bool,
}
}
}
+/// Builder pattern returned from the various methods on `Rules` which will add
+/// the rule to the internal list on `Drop`.
struct RuleBuilder<'a: 'b, 'b> {
rules: &'b mut Rules<'a>,
rule: Rule<'a>,
}
}
+ /// Creates a new rule of `Kind::Build` with the specified human readable
+ /// name and path associated with it.
+ ///
+ /// The builder returned should be configured further with information such
+ /// as how to actually run this rule.
fn build<'b>(&'b mut self, name: &'a str, path: &'a str)
-> RuleBuilder<'a, 'b> {
self.rule(name, path, Kind::Build)
}
+ /// Same as `build`, but for `Kind::Test`.
fn test<'b>(&'b mut self, name: &'a str, path: &'a str)
-> RuleBuilder<'a, 'b> {
self.rule(name, path, Kind::Test)
}
+ /// Same as `build`, but for `Kind::Bench`.
fn bench<'b>(&'b mut self, name: &'a str, path: &'a str)
-> RuleBuilder<'a, 'b> {
self.rule(name, path, Kind::Bench)
}
+ /// Same as `build`, but for `Kind::Doc`.
fn doc<'b>(&'b mut self, name: &'a str, path: &'a str)
-> RuleBuilder<'a, 'b> {
self.rule(name, path, Kind::Doc)
}
+ /// Same as `build`, but for `Kind::Dist`.
fn dist<'b>(&'b mut self, name: &'a str, path: &'a str)
-> RuleBuilder<'a, 'b> {
self.rule(name, path, Kind::Dist)
/// Construct the top-level build steps that we're going to be executing,
/// given the subcommand that our build is performing.
fn plan(&self) -> Vec<Step<'a>> {
+ // Ok, the logic here is pretty subtle, and involves quite a few
+ // conditionals. The basic idea here is to:
+ //
+ // 1. First, filter all our rules to the relevant ones. This means that
+ // the command specified corresponds to one of our `Kind` variants,
+ // and we filter all rules based on that.
+ //
+ // 2. Next, we determine which rules we're actually executing. If a
+ // number of path filters were specified on the command line we look
+ // for those, otherwise we look for anything tagged `default`.
+ //
+ // 3. Finally, we generate some steps with host and target information.
+ //
+ // The last step is by far the most complicated and subtle. The basic
+ // thinking here is that we want to take the cartesian product of
+ // specified hosts and targets and build rules with that. The list of
+ // hosts and targets, if not specified, come from the how this build was
+ // configured. If the rule we're looking at is a host-only rule the we
+ // ignore the list of targets and instead consider the list of hosts
+ // also the list of targets.
+ //
+ // Once the host and target lists are generated we take the cartesian
+ // product of the two and then create a step based off them. Note that
+ // the stage each step is associated was specified with the `--step`
+ // flag on the command line.
let (kind, paths) = match self.build.flags.cmd {
Subcommand::Build { ref paths } => (Kind::Build, &paths[..]),
Subcommand::Doc { ref paths } => (Kind::Doc, &paths[..]),
} else {
&self.build.config.target
};
- let arr = if rule.host {hosts} else {targets};
+ // If --target was specified but --host wasn't specified, don't run
+ // any host-only tests
+ let arr = if rule.host {
+ if self.build.flags.target.len() > 0 &&
+ self.build.flags.host.len() == 0 {
+ &hosts[..0]
+ } else {
+ hosts
+ }
+ } else {
+ targets
+ };
hosts.iter().flat_map(move |host| {
arr.iter().map(move |target| {
}
}
+ /// Performs topological sort of dependencies rooted at the `step`
+ /// specified, pushing all results onto the `order` vector provided.
+ ///
+ /// In other words, when this method returns, the `order` vector will
+ /// contain a list of steps which if executed in order will eventually
+ /// complete the `step` specified as well.
+ ///
+ /// The `added` set specified here is the set of steps that are already
+ /// present in `order` (and hence don't need to be added again).
fn fill(&self,
step: Step<'a>,
order: &mut Vec<Step<'a>>,
use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
+use std::time::Instant;
use filetime::FileTime;
buf
}
+
+pub struct TimeIt(Instant);
+
+/// Returns an RAII structure that prints out how long it took to drop.
+pub fn timeit() -> TimeIt {
+ TimeIt(Instant::now())
+}
+
+impl Drop for TimeIt {
+ fn drop(&mut self) {
+ let time = self.0.elapsed();
+ println!("\tfinished in {}.{:03}",
+ time.as_secs(),
+ time.subsec_nanos() / 1_000_000);
+ }
+}
curl \
ca-certificates \
python2.7 \
- python-minimal \
git \
cmake \
ccache \
--arm-linux-androideabi-ndk=/android/ndk-arm-9 \
--armv7-linux-androideabi-ndk=/android/ndk-arm-9 \
--i686-linux-android-ndk=/android/ndk-x86-9 \
- --aarch64-linux-android-ndk=/android/ndk-aarch64 \
- --enable-rustbuild
-ENV RUST_CHECK_TARGET check-stage2-android
+ --aarch64-linux-android-ndk=/android/ndk-aarch64
+ENV XPY_CHECK test --target arm-linux-androideabi
RUN mkdir /tmp/obj
RUN chmod 777 /tmp/obj
curl \
ca-certificates \
python2.7 \
- python-minimal \
git \
cmake \
ccache \
src_dir="`dirname $ci_dir`"
root_dir="`dirname $src_dir`"
-docker build \
+docker \
+ build \
--rm \
-t rust-ci \
"`dirname "$script"`/$image"
mkdir -p $HOME/.ccache
mkdir -p $HOME/.cargo
+mkdir -p $root_dir/obj
-exec docker run \
+exec docker \
+ run \
--volume "$root_dir:/checkout:ro" \
- --workdir /tmp/obj \
+ --volume "$root_dir/obj:/checkout/obj" \
+ --workdir /checkout/obj \
--env SRC=/checkout \
--env CCACHE_DIR=/ccache \
--volume "$HOME/.ccache:/ccache" \
curl \
ca-certificates \
python2.7 \
- python-minimal \
git \
cmake \
ccache \
AR_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-ar \
CC_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-gcc
-ENV RUST_CONFIGURE_ARGS --target=x86_64-unknown-freebsd --enable-rustbuild
+ENV RUST_CONFIGURE_ARGS --target=x86_64-unknown-freebsd
ENV RUST_CHECK_TARGET ""
RUN mkdir /tmp/obj
RUN chmod 777 /tmp/obj
curl \
ca-certificates \
python2.7 \
- python-minimal \
git \
cmake \
ccache \
libssl-dev \
sudo
-ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --enable-rustbuild
+ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu
ENV RUST_CHECK_TARGET check-cargotest
+ENV NO_VENDOR 1
RUN mkdir /tmp/obj
RUN chmod 777 /tmp/obj
curl \
ca-certificates \
python2.7 \
- python2.7-minimal \
git \
cmake \
ccache \
ENV RUST_CONFIGURE_ARGS \
--build=x86_64-unknown-linux-gnu \
- --enable-rustbuild \
--llvm-root=/usr/lib/llvm-3.7
ENV RUST_CHECK_TARGET check
RUN mkdir /tmp/obj
--- /dev/null
+FROM ubuntu:16.04
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ g++ \
+ make \
+ file \
+ curl \
+ ca-certificates \
+ python2.7 \
+ git \
+ cmake \
+ ccache \
+ sudo \
+ gdb
+
+ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --disable-rustbuild
+ENV RUST_CHECK_TARGET check
+RUN mkdir /tmp/obj
+RUN chmod 777 /tmp/obj
+++ /dev/null
-FROM ubuntu:16.04
-
-RUN apt-get update && apt-get install -y --no-install-recommends \
- g++ \
- make \
- file \
- curl \
- ca-certificates \
- python2.7 \
- python-minimal \
- git \
- cmake \
- ccache \
- sudo \
- gdb
-
-ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --enable-rustbuild
-ENV RUST_CHECK_TARGET check
-RUN mkdir /tmp/obj
-RUN chmod 777 /tmp/obj
ENV RUST_CONFIGURE_ARGS \
--target=x86_64-unknown-linux-musl \
- --musl-root=/musl-x86_64
+ --musl-root-x86_64=/musl-x86_64
ENV RUST_CHECK_TARGET check-stage2-T-x86_64-unknown-linux-musl-H-x86_64-unknown-linux-gnu
+ENV PATH=$PATH:/musl-x86_64/bin
+ENV XPY_CHECK test --target x86_64-unknown-linux-musl
RUN mkdir /tmp/obj
RUN chmod 777 /tmp/obj
if [ "$LOCAL_USER_ID" != "" ]; then
useradd --shell /bin/bash -u $LOCAL_USER_ID -o -c "" -m user
export HOME=/home/user
- export LOCAL_USER_ID=
- exec sudo -E -u user env PATH=$PATH "$0"
+ unset LOCAL_USER_ID
+ exec su --preserve-environment -c "env PATH=$PATH \"$0\"" user
fi
if [ "$NO_LLVM_ASSERTIONS" = "" ]; then
- LLVM_ASSERTIONS=--enable-llvm-assertions
+ ENABLE_LLVM_ASSERTIONS=--enable-llvm-assertions
+fi
+
+if [ "$NO_VENDOR" = "" ]; then
+ ENABLE_VENDOR=--enable-vendor
+fi
+
+if [ "$NO_CCACHE" = "" ]; then
+ ENABLE_CCACHE=--enable-ccache
fi
set -ex
--disable-manage-submodules \
--enable-debug-assertions \
--enable-quiet-tests \
- --enable-ccache \
- --enable-vendor \
- $LLVM_ASSERTIONS \
+ $ENABLE_CCACHE \
+ $ENABLE_VENDOR \
+ $ENABLE_LLVM_ASSERTIONS \
$RUST_CONFIGURE_ARGS
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
make -j $ncpus tidy
make -j $ncpus
-exec make $RUST_CHECK_TARGET -j $ncpus
+if [ ! -z "$XPY_CHECK" ]; then
+ exec python2.7 $SRC/x.py $XPY_CHECK
+else
+ exec make $RUST_CHECK_TARGET -j $ncpus
+fi
It’s important to be mindful of `panic!`s when working with FFI. A `panic!`
across an FFI boundary is undefined behavior. If you’re writing code that may
-panic, you should run it in another thread, so that the panic doesn’t bubble up
-to C:
+panic, you should run it in a closure with [`catch_unwind()`]:
```rust
-use std::thread;
+use std::panic::catch_unwind;
#[no_mangle]
pub extern fn oh_no() -> i32 {
- let h = thread::spawn(|| {
+ let result = catch_unwind(|| {
panic!("Oops!");
});
-
- match h.join() {
- Ok(_) => 1,
- Err(_) => 0,
+ match result {
+ Ok(_) => 0,
+ Err(_) => 1,
}
}
-# fn main() {}
+
+fn main() {}
```
+Please note that [`catch_unwind()`] will only catch unwinding panics, not
+those who abort the process. See the documentation of [`catch_unwind()`]
+for more information.
+
+[`catch_unwind()`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html
+
# Representing opaque structs
Sometimes, a C library wants to provide a pointer to something, but not let you
* `ty`: a [type](#types)
* `ident`: an [identifier](#identifiers)
* `path`: a [path](#paths)
-* `tt`: either side of the `=>` in macro rules
+* `tt`: a token tree (a single [token](#tokens) or a sequence of token trees surrounded
+ by matching `()`, `[]`, or `{}`)
* `meta`: the contents of an [attribute](#attributes)
In the transcriber, the
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
+impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
#![feature(allocator)]
#![feature(box_syntax)]
+#![feature(cfg_target_has_atomic)]
#![feature(coerce_unsized)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(custom_attribute)]
#![feature(dropck_parametricity)]
+#![cfg_attr(not(test), feature(exact_size_is_empty))]
#![feature(fundamental)]
#![feature(lang_items)]
#![feature(needs_allocator)]
}
#[cfg(test)]
mod boxed_test;
+#[cfg(target_has_atomic = "ptr")]
pub mod arc;
pub mod rc;
pub mod raw_vec;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use core::sync::atomic::{AtomicPtr, Ordering};
-use core::mem;
+#[cfg(target_has_atomic = "ptr")]
+pub use self::imp::set_oom_handler;
use core::intrinsics;
-static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(default_oom_handler as *mut ());
-
fn default_oom_handler() -> ! {
// The default handler can't do much more since we can't assume the presence
// of libc or any way of printing an error message.
#[unstable(feature = "oom", reason = "not a scrutinized interface",
issue = "27700")]
pub fn oom() -> ! {
- let value = OOM_HANDLER.load(Ordering::SeqCst);
- let handler: fn() -> ! = unsafe { mem::transmute(value) };
- handler();
+ self::imp::oom()
}
-/// Set a custom handler for out-of-memory conditions
-///
-/// To avoid recursive OOM failures, it is critical that the OOM handler does
-/// not allocate any memory itself.
-#[unstable(feature = "oom", reason = "not a scrutinized interface",
- issue = "27700")]
-pub fn set_oom_handler(handler: fn() -> !) {
- OOM_HANDLER.store(handler as *mut (), Ordering::SeqCst);
+#[cfg(target_has_atomic = "ptr")]
+mod imp {
+ use core::mem;
+ use core::sync::atomic::{AtomicPtr, Ordering};
+
+ static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(super::default_oom_handler as *mut ());
+
+ #[inline(always)]
+ pub fn oom() -> ! {
+ let value = OOM_HANDLER.load(Ordering::SeqCst);
+ let handler: fn() -> ! = unsafe { mem::transmute(value) };
+ handler();
+ }
+
+ /// Set a custom handler for out-of-memory conditions
+ ///
+ /// To avoid recursive OOM failures, it is critical that the OOM handler does
+ /// not allocate any memory itself.
+ #[unstable(feature = "oom", reason = "not a scrutinized interface",
+ issue = "27700")]
+ pub fn set_oom_handler(handler: fn() -> !) {
+ OOM_HANDLER.store(handler as *mut (), Ordering::SeqCst);
+ }
+}
+
+#[cfg(not(target_has_atomic = "ptr"))]
+mod imp {
+ #[inline(always)]
+ pub fn oom() -> ! {
+ super::default_oom_handler()
+ }
}
//! Single-threaded reference-counting pointers.
//!
-//! The type [`Rc<T>`][rc] provides shared ownership of a value of type `T`,
-//! allocated in the heap. Invoking [`clone`][clone] on `Rc` produces a new
-//! pointer to the same value in the heap. When the last `Rc` pointer to a
+//! The type [`Rc<T>`][`Rc`] provides shared ownership of a value of type `T`,
+//! allocated in the heap. Invoking [`clone()`][clone] on [`Rc`] produces a new
+//! pointer to the same value in the heap. When the last [`Rc`] pointer to a
//! given value is destroyed, the pointed-to value is also destroyed.
//!
//! Shared references in Rust disallow mutation by default, and `Rc` is no
-//! exception. If you need to mutate through an `Rc`, use [`Cell`][cell] or
-//! [`RefCell`][refcell].
+//! exception. If you need to mutate through an [`Rc`], use [`Cell`] or
+//! [`RefCell`].
//!
-//! `Rc` uses non-atomic reference counting. This means that overhead is very
-//! low, but an `Rc` cannot be sent between threads, and consequently `Rc`
+//! [`Rc`] uses non-atomic reference counting. This means that overhead is very
+//! low, but an [`Rc`] cannot be sent between threads, and consequently [`Rc`]
//! does not implement [`Send`][send]. As a result, the Rust compiler
-//! will check *at compile time* that you are not sending `Rc`s between
+//! will check *at compile time* that you are not sending [`Rc`]s between
//! threads. If you need multi-threaded, atomic reference counting, use
//! [`sync::Arc`][arc].
//!
-//! The [`downgrade`][downgrade] method can be used to create a non-owning
-//! [`Weak`][weak] pointer. A `Weak` pointer can be [`upgrade`][upgrade]d
-//! to an `Rc`, but this will return [`None`][option] if the value has
+//! The [`downgrade()`][downgrade] method can be used to create a non-owning
+//! [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
+//! to an [`Rc`], but this will return [`None`] if the value has
//! already been dropped.
//!
-//! A cycle between `Rc` pointers will never be deallocated. For this reason,
-//! `Weak` is used to break cycles. For example, a tree could have strong
-//! `Rc` pointers from parent nodes to children, and `Weak` pointers from
+//! A cycle between [`Rc`] pointers will never be deallocated. For this reason,
+//! [`Weak`] is used to break cycles. For example, a tree could have strong
+//! [`Rc`] pointers from parent nodes to children, and [`Weak`] pointers from
//! children back to their parents.
//!
-//! `Rc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
-//! so you can call `T`'s methods on a value of type `Rc<T>`. To avoid name
-//! clashes with `T`'s methods, the methods of `Rc<T>` itself are [associated
+//! `Rc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
+//! so you can call `T`'s methods on a value of type [`Rc<T>`][`Rc`]. To avoid name
+//! clashes with `T`'s methods, the methods of [`Rc<T>`][`Rc`] itself are [associated
//! functions][assoc], called using function-like syntax:
//!
//! ```
//! Rc::downgrade(&my_rc);
//! ```
//!
-//! `Weak<T>` does not auto-dereference to `T`, because the value may have
+//! [`Weak<T>`][`Weak`] does not auto-dereference to `T`, because the value may have
//! already been destroyed.
//!
-//! [rc]: struct.Rc.html
-//! [weak]: struct.Weak.html
-//! [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
-//! [cell]: ../../std/cell/struct.Cell.html
-//! [refcell]: ../../std/cell/struct.RefCell.html
-//! [send]: ../../std/marker/trait.Send.html
-//! [arc]: ../../std/sync/struct.Arc.html
-//! [deref]: ../../std/ops/trait.Deref.html
-//! [downgrade]: struct.Rc.html#method.downgrade
-//! [upgrade]: struct.Weak.html#method.upgrade
-//! [option]: ../../std/option/enum.Option.html
-//! [assoc]: ../../book/method-syntax.html#associated-functions
-//!
//! # Examples
//!
//! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`.
//! We want to have our `Gadget`s point to their `Owner`. We can't do this with
//! unique ownership, because more than one gadget may belong to the same
-//! `Owner`. `Rc` allows us to share an `Owner` between multiple `Gadget`s,
+//! `Owner`. [`Rc`] allows us to share an `Owner` between multiple `Gadget`s,
//! and have the `Owner` remain allocated as long as any `Gadget` points at it.
//!
//! ```
//! ```
//!
//! If our requirements change, and we also need to be able to traverse from
-//! `Owner` to `Gadget`, we will run into problems. An `Rc` pointer from `Owner`
+//! `Owner` to `Gadget`, we will run into problems. An [`Rc`] pointer from `Owner`
//! to `Gadget` introduces a cycle between the values. This means that their
//! reference counts can never reach 0, and the values will remain allocated
-//! forever: a memory leak. In order to get around this, we can use `Weak`
+//! forever: a memory leak. In order to get around this, we can use [`Weak`]
//! pointers.
//!
//! Rust actually makes it somewhat difficult to produce this loop in the first
//! place. In order to end up with two values that point at each other, one of
-//! them needs to be mutable. This is difficult because `Rc` enforces
+//! them needs to be mutable. This is difficult because [`Rc`] enforces
//! memory safety by only giving out shared references to the value it wraps,
//! and these don't allow direct mutation. We need to wrap the part of the
-//! value we wish to mutate in a [`RefCell`][refcell], which provides *interior
+//! value we wish to mutate in a [`RefCell`], which provides *interior
//! mutability*: a method to achieve mutability through a shared reference.
-//! `RefCell` enforces Rust's borrowing rules at runtime.
+//! [`RefCell`] enforces Rust's borrowing rules at runtime.
//!
//! ```
//! use std::rc::Rc;
//! // Gadget Man, so he gets destroyed as well.
//! }
//! ```
+//!
+//! [`Rc`]: struct.Rc.html
+//! [`Weak`]: struct.Weak.html
+//! [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
+//! [`Cell`]: ../../std/cell/struct.Cell.html
+//! [`RefCell`]: ../../std/cell/struct.RefCell.html
+//! [send]: ../../std/marker/trait.Send.html
+//! [arc]: ../../std/sync/struct.Arc.html
+//! [`Deref`]: ../../std/ops/trait.Deref.html
+//! [downgrade]: struct.Rc.html#method.downgrade
+//! [upgrade]: struct.Weak.html#method.upgrade
+//! [`None`]: ../../std/option/enum.Option.html#variant.None
+//! [assoc]: ../../book/method-syntax.html#associated-functions
#![stable(feature = "rust1", since = "1.0.0")]
/// See the [module-level documentation](./index.html) for more details.
///
/// The inherent methods of `Rc` are all associated functions, which means
-/// that you have to call them as e.g. `Rc::get_mut(&value)` instead of
-/// `value.get_mut()`. This avoids conflicts with methods of the inner
+/// that you have to call them as e.g. [`Rc::get_mut(&value)`][get_mut] instead of
+/// `value.get_mut()`. This avoids conflicts with methods of the inner
/// type `T`.
+///
+/// [get_mut]: #method.get_mut
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
ptr: Shared<RcBox<T>>,
}
/// Checks whether [`Rc::try_unwrap`][try_unwrap] would return
- /// [`Ok`][result].
+ /// [`Ok`].
///
/// [try_unwrap]: struct.Rc.html#method.try_unwrap
- /// [result]: ../../std/result/enum.Result.html
+ /// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
///
/// # Examples
///
/// Returns a mutable reference to the inner value, if there are
/// no other `Rc` or [`Weak`][weak] pointers to the same value.
///
- /// Returns [`None`][option] otherwise, because it is not safe to
+ /// Returns [`None`] otherwise, because it is not safe to
/// mutate a shared value.
///
/// See also [`make_mut`][make_mut], which will [`clone`][clone]
/// the inner value when it's shared.
///
/// [weak]: struct.Weak.html
- /// [option]: ../../std/option/enum.Option.html
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [make_mut]: struct.Rc.html#method.make_mut
/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
///
cmd.arg(format!("--build={}", build_helper::gnu_target(&host)));
run(&mut cmd);
- run(Command::new("make")
- .current_dir(&build_dir)
- .arg("build_lib_static")
- .arg("-j")
- .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")));
+ let mut make = Command::new("make");
+ make.current_dir(&build_dir)
+ .arg("build_lib_static");
+
+ // mingw make seems... buggy? unclear...
+ if !host.contains("windows") {
+ make.arg("-j")
+ .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set"));
+ }
+
+ run(&mut make);
if target.contains("windows") {
println!("cargo:rustc-link-lib=static=jemalloc");
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+impl<'a, T> ExactSizeIterator for Iter<'a, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for Iter<'a, T> {}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for IntoIter<T> {}
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<T> FusedIterator for IntoIter<T> {}
}
#[stable(feature = "drain", since = "1.6.0")]
-impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
+impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
#![feature(placement_in)]
#![feature(placement_new_protocol)]
#![feature(shared)]
+#![feature(slice_get_slice)]
#![feature(slice_patterns)]
#![feature(specialization)]
#![feature(staged_api)]
-#![feature(step_by)]
#![feature(trusted_len)]
#![feature(unicode)]
#![feature(unique)]
-#![feature(slice_get_slice)]
+#![feature(untagged_unions)]
#![cfg_attr(test, feature(rand, test))]
#![no_std]
#![cfg_attr(test, allow(unused_imports, dead_code))]
use alloc::boxed::Box;
-use core::cmp::Ordering::{self, Greater, Less};
-use core::cmp;
+use core::cmp::Ordering::{self, Greater};
use core::mem::size_of;
use core::mem;
use core::ptr;
/// This is equivalent to `self.sort_by(|a, b| a.cmp(b))`.
///
- /// This sort is stable and `O(n log n)` worst-case but allocates
- /// approximately `2 * n` where `n` is the length of `self`.
+ /// This sort is stable and `O(n log n)` worst-case, but allocates
+ /// temporary storage half the size of `self`.
///
/// # Examples
///
/// Sorts the slice, in place, using `f` to extract a key by which to
/// order the sort by.
///
- /// This sort is stable and `O(n log n)` worst-case but allocates
- /// approximately `2 * n`, where `n` is the length of `self`.
+ /// This sort is stable and `O(n log n)` worst-case, but allocates
+ /// temporary storage half the size of `self`.
///
/// # Examples
///
/// Sorts the slice, in place, using `compare` to compare
/// elements.
///
- /// This sort is stable and `O(n log n)` worst-case but allocates
- /// approximately `2 * n`, where `n` is the length of `self`.
+ /// This sort is stable and `O(n log n)` worst-case, but allocates
+ /// temporary storage half the size of `self`.
///
/// # Examples
///
// Sorting
////////////////////////////////////////////////////////////////////////////////
-fn insertion_sort<T, F>(v: &mut [T], mut compare: F)
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+fn insert_head<T, F>(v: &mut [T], compare: &mut F)
where F: FnMut(&T, &T) -> Ordering
{
- let len = v.len() as isize;
- let buf_v = v.as_mut_ptr();
-
- // 1 <= i < len;
- for i in 1..len {
- // j satisfies: 0 <= j <= i;
- let mut j = i;
+ if v.len() >= 2 && compare(&v[0], &v[1]) == Greater {
unsafe {
- // `i` is in bounds.
- let read_ptr = buf_v.offset(i) as *const T;
-
- // find where to insert, we need to do strict <,
- // rather than <=, to maintain stability.
-
- // 0 <= j - 1 < len, so .offset(j - 1) is in bounds.
- while j > 0 && compare(&*read_ptr, &*buf_v.offset(j - 1)) == Less {
- j -= 1;
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let mut tmp = NoDrop { value: ptr::read(&v[0]) };
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `compare`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `compare` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole {
+ src: &mut tmp.value,
+ dest: &mut v[1],
+ };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if compare(&tmp.value, &v[i]) != Greater {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
}
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
- // shift everything to the right, to make space to
- // insert this value.
+ // Holds a value, but never drops it.
+ #[allow(unions_with_drop_fields)]
+ union NoDrop<T> {
+ value: T
+ }
- // j + 1 could be `len` (for the last `i`), but in
- // that case, `i == j` so we don't copy. The
- // `.offset(j)` is always in bounds.
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *mut T,
+ dest: *mut T,
+ }
- if i != j {
- let tmp = ptr::read(read_ptr);
- ptr::copy(&*buf_v.offset(j), buf_v.offset(j + 1), (i - j) as usize);
- ptr::copy_nonoverlapping(&tmp, buf_v.offset(j), 1);
- mem::forget(tmp);
- }
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); }
}
}
}
-fn merge_sort<T, F>(v: &mut [T], mut compare: F)
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, compare: &mut F)
where F: FnMut(&T, &T) -> Ordering
{
- // warning: this wildly uses unsafe.
- const BASE_INSERTION: usize = 32;
- const LARGE_INSERTION: usize = 16;
-
- // FIXME #12092: smaller insertion runs seems to make sorting
- // vectors of large elements a little faster on some platforms,
- // but hasn't been tested/tuned extensively
- let insertion = if size_of::<T>() <= 16 {
- BASE_INSERTION
+ let len = v.len();
+ let v = v.as_mut_ptr();
+ let v_mid = v.offset(mid as isize);
+ let v_end = v.offset(len as isize);
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `compare`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `compare` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole {
+ start: buf,
+ end: buf.offset(mid as isize),
+ dest: v,
+ };
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+ let to_copy = if compare(&**left, &*right) == Greater {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
} else {
- LARGE_INSERTION
- };
+ // The right run is shorter.
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole {
+ start: buf,
+ end: buf.offset((len - mid) as isize),
+ dest: v_mid,
+ };
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+ let to_copy = if compare(&*left.offset(-1), &*right.offset(-1)) == Greater {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
- let len = v.len();
+ unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+ *ptr = ptr.offset(1);
+ old
+ }
- // short vectors get sorted in-place via insertion sort to avoid allocations
- if len <= insertion {
- insertion_sort(v, compare);
- return;
+ unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ *ptr = ptr.offset(-1);
+ *ptr
}
- // allocate some memory to use as scratch memory, we keep the
- // length 0 so we can keep shallow copies of the contents of `v`
- // without risking the dtors running on an object twice if
- // `compare` panics.
- let mut working_space = Vec::with_capacity(2 * len);
- // these both are buffers of length `len`.
- let mut buf_dat = working_space.as_mut_ptr();
- let mut buf_tmp = unsafe { buf_dat.offset(len as isize) };
-
- // length `len`.
- let buf_v = v.as_ptr();
-
- // step 1. sort short runs with insertion sort. This takes the
- // values from `v` and sorts them into `buf_dat`, leaving that
- // with sorted runs of length INSERTION.
-
- // We could hardcode the sorting comparisons here, and we could
- // manipulate/step the pointers themselves, rather than repeatedly
- // .offset-ing.
- for start in (0..len).step_by(insertion) {
- // start <= i < len;
- for i in start..cmp::min(start + insertion, len) {
- // j satisfies: start <= j <= i;
- let mut j = i as isize;
- unsafe {
- // `i` is in bounds.
- let read_ptr = buf_v.offset(i as isize);
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // `T` is not a zero-sized type, so it's okay to divide by it's size.
+ let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
+ unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); }
+ }
+ }
+}
- // find where to insert, we need to do strict <,
- // rather than <=, to maintain stability.
+/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
+/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied, for every `i` in `0 .. runs.len() - 2`:
+///
+/// 1. `runs[i].len > runs[i + 1].len`
+/// 2. `runs[i].len > runs[i + 1].len + runs[i + 2].len`
+///
+/// The invariants ensure that the total running time is `O(n log n)` worst-case.
+fn merge_sort<T, F>(v: &mut [T], mut compare: F)
+ where F: FnMut(&T, &T) -> Ordering
+{
+ // Sorting has no meaningful behavior on zero-sized types.
+ if size_of::<T>() == 0 {
+ return;
+ }
- // start <= j - 1 < len, so .offset(j - 1) is in
- // bounds.
- while j > start as isize && compare(&*read_ptr, &*buf_dat.offset(j - 1)) == Less {
- j -= 1;
- }
+ // FIXME #12092: These numbers are platform-specific and need more extensive testing/tuning.
+ //
+ // If `v` has length up to `insertion_len`, simply switch to insertion sort because it is going
+ // to perform better than merge sort. For bigger types `T`, the threshold is smaller.
+ //
+ // Short runs are extended using insertion sort to span at least `min_run` elements, in order
+ // to improve performance.
+ let (max_insertion, min_run) = if size_of::<T>() <= 16 {
+ (64, 32)
+ } else {
+ (32, 16)
+ };
- // shift everything to the right, to make space to
- // insert this value.
+ let len = v.len();
- // j + 1 could be `len` (for the last `i`), but in
- // that case, `i == j` so we don't copy. The
- // `.offset(j)` is always in bounds.
- ptr::copy(&*buf_dat.offset(j), buf_dat.offset(j + 1), i - j as usize);
- ptr::copy_nonoverlapping(read_ptr, buf_dat.offset(j), 1);
+ // Short arrays get sorted in-place via insertion sort to avoid allocations.
+ if len <= max_insertion {
+ if len >= 2 {
+ for i in (0..len-1).rev() {
+ insert_head(&mut v[i..], &mut compare);
}
}
+ return;
}
- // step 2. merge the sorted runs.
- let mut width = insertion;
- while width < len {
- // merge the sorted runs of length `width` in `buf_dat` two at
- // a time, placing the result in `buf_tmp`.
-
- // 0 <= start <= len.
- for start in (0..len).step_by(2 * width) {
- // manipulate pointers directly for speed (rather than
- // using a `for` loop with `range` and `.offset` inside
- // that loop).
- unsafe {
- // the end of the first run & start of the
- // second. Offset of `len` is defined, since this is
- // precisely one byte past the end of the object.
- let right_start = buf_dat.offset(cmp::min(start + width, len) as isize);
- // end of the second. Similar reasoning to the above re safety.
- let right_end_idx = cmp::min(start + 2 * width, len);
- let right_end = buf_dat.offset(right_end_idx as isize);
-
- // the pointers to the elements under consideration
- // from the two runs.
-
- // both of these are in bounds.
- let mut left = buf_dat.offset(start as isize);
- let mut right = right_start;
-
- // where we're putting the results, it is a run of
- // length `2*width`, so we step it once for each step
- // of either `left` or `right`. `buf_tmp` has length
- // `len`, so these are in bounds.
- let mut out = buf_tmp.offset(start as isize);
- let out_end = buf_tmp.offset(right_end_idx as isize);
-
- // If left[last] <= right[0], they are already in order:
- // fast-forward the left side (the right side is handled
- // in the loop).
- // If `right` is not empty then left is not empty, and
- // the offsets are in bounds.
- if right != right_end && compare(&*right.offset(-1), &*right) != Greater {
- let elems = (right_start as usize - left as usize) / mem::size_of::<T>();
- ptr::copy_nonoverlapping(&*left, out, elems);
- out = out.offset(elems as isize);
- left = right_start;
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `compare` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
+ // which will always have length at most `len / 2`.
+ let mut buf = Vec::with_capacity(len / 2);
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut runs = vec![];
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+ if start > 0 {
+ start -= 1;
+ if compare(&v[start], &v[start + 1]) == Greater {
+ while start > 0 && compare(&v[start - 1], &v[start]) == Greater {
+ start -= 1;
}
-
- while out < out_end {
- // Either the left or the right run are exhausted,
- // so just copy the remainder from the other run
- // and move on; this gives a huge speed-up (order
- // of 25%) for mostly sorted vectors (the best
- // case).
- if left == right_start {
- // the number remaining in this run.
- let elems = (right_end as usize - right as usize) / mem::size_of::<T>();
- ptr::copy_nonoverlapping(&*right, out, elems);
- break;
- } else if right == right_end {
- let elems = (right_start as usize - left as usize) / mem::size_of::<T>();
- ptr::copy_nonoverlapping(&*left, out, elems);
- break;
- }
-
- // check which side is smaller, and that's the
- // next element for the new run.
-
- // `left < right_start` and `right < right_end`,
- // so these are valid.
- let to_copy = if compare(&*left, &*right) == Greater {
- step(&mut right)
- } else {
- step(&mut left)
- };
- ptr::copy_nonoverlapping(&*to_copy, out, 1);
- step(&mut out);
+ v[start..end].reverse();
+ } else {
+ while start > 0 && compare(&v[start - 1], &v[start]) != Greater {
+ start -= 1;
}
}
}
- mem::swap(&mut buf_dat, &mut buf_tmp);
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < min_run {
+ start -= 1;
+ insert_head(&mut v[start..end], &mut compare);
+ }
- width *= 2;
+ // Push this run onto the stack.
+ runs.push(Run {
+ start: start,
+ len: end - start,
+ });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(&runs) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ unsafe {
+ merge(&mut v[left.start .. right.start + right.len], left.len, buf.as_mut_ptr(),
+ &mut compare);
+ }
+ runs[r] = Run {
+ start: left.start,
+ len: left.len + right.len,
+ };
+ runs.remove(r + 1);
+ }
}
- // write the result to `v` in one go, so that there are never two copies
- // of the same object in `v`.
- unsafe {
- ptr::copy_nonoverlapping(&*buf_dat, v.as_mut_ptr(), len);
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+ // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+ // algorithm should continue building a new run instead, `None` is returned.
+ //
+ // TimSort is infamous for it's buggy implementations, as described here:
+ // http://envisage-project.eu/timsort-specification-and-verification/
+ //
+ // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+ // Enforcing them on just top three is not sufficient to ensure that the invariants will still
+ // hold for *all* runs in the stack.
+ //
+ // This function correctly checks invariants for the top four runs. Additionally, if the top
+ // run starts at index 0, it will always demand a merge operation until the stack is fully
+ // collapsed, in order to complete the sort.
+ #[inline]
+ fn collapse(runs: &[Run]) -> Option<usize> {
+ let n = runs.len();
+ if n >= 2 && (runs[n - 1].start == 0 ||
+ runs[n - 2].len <= runs[n - 1].len ||
+ (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) ||
+ (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len {
+ Some(n - 3)
+ } else {
+ Some(n - 2)
+ }
+ } else {
+ None
+ }
}
- // increment the pointer, returning the old pointer.
- #[inline(always)]
- unsafe fn step<T>(ptr: &mut *mut T) -> *mut T {
- let old = *ptr;
- *ptr = ptr.offset(1);
- old
+ #[derive(Clone, Copy)]
+ struct Run {
+ start: usize,
+ len: usize,
}
}
/// ```
#[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
pub fn extend_from_slice(&mut self, other: &[T]) {
- self.extend(other.iter().cloned())
+ self.spec_extend(other.iter())
}
}
impl<T> FromIterator<T> for Vec<T> {
#[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
- <Self as SpecExtend<_>>::from_iter(iter.into_iter())
+ <Self as SpecExtend<_, _>>::from_iter(iter.into_iter())
}
}
}
// Specialization trait used for Vec::from_iter and Vec::extend
-trait SpecExtend<I> {
+trait SpecExtend<T, I> {
fn from_iter(iter: I) -> Self;
fn spec_extend(&mut self, iter: I);
}
-impl<I, T> SpecExtend<I> for Vec<T>
+impl<T, I> SpecExtend<T, I> for Vec<T>
where I: Iterator<Item=T>,
{
default fn from_iter(mut iterator: I) -> Self {
}
}
-impl<I, T> SpecExtend<I> for Vec<T>
+impl<T, I> SpecExtend<T, I> for Vec<T>
where I: TrustedLen<Item=T>,
{
fn from_iter(iterator: I) -> Self {
}
}
+impl<'a, T: 'a, I> SpecExtend<&'a T, I> for Vec<T>
+ where I: Iterator<Item=&'a T>,
+ T: Clone,
+{
+ default fn from_iter(iterator: I) -> Self {
+ SpecExtend::from_iter(iterator.cloned())
+ }
+
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.cloned())
+ }
+}
+
+impl<'a, T: 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T>
+ where T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ self.reserve(slice.len());
+ unsafe {
+ let len = self.len();
+ self.set_len(len + slice.len());
+ self.get_unchecked_mut(len..).copy_from_slice(slice);
+ }
+ }
+}
+
impl<T> Vec<T> {
fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) {
// This is the case for a general iterator.
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy> Extend<&'a T> for Vec<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
- self.extend(iter.into_iter().map(|&x| x))
+ self.spec_extend(iter.into_iter())
}
}
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
- self.len() == 0
+ self.tail == self.head
}
/// Create a draining iterator that removes the specified range in the
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+impl<'a, T> ExactSizeIterator for Iter<'a, T> {
+ fn is_empty(&self) -> bool {
+ self.head == self.tail
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for Iter<'a, T> {}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
+impl<'a, T> ExactSizeIterator for IterMut<'a, T> {
+ fn is_empty(&self) -> bool {
+ self.head == self.tail
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for IterMut<'a, T> {}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for IntoIter<T> {}
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+}
#[unstable(feature = "fused", issue = "35602")]
impl<T> FusedIterator for IntoIter<T> {}
#[test]
fn test_sort() {
- for len in 4..25 {
+ for len in (2..25).chain(500..510) {
for _ in 0..100 {
let mut v: Vec<_> = thread_rng().gen_iter::<i32>().take(len).collect();
let mut v1 = v.clone();
#[test]
fn test_sort_stability() {
- for len in 4..25 {
+ for len in (2..25).chain(500..510) {
for _ in 0..10 {
let mut counts = [0; 10];
}
}
+#[test]
+fn test_sort_zero_sized_type() {
+ // Should not panic.
+ [(); 10].sort();
+ [(); 100].sort();
+}
+
#[test]
fn test_concat() {
let v: [Vec<i32>; 0] = [];
})
}
- #[bench]
- fn sort_random_small(b: &mut Bencher) {
- let mut rng = thread_rng();
- b.iter(|| {
- let mut v: Vec<_> = rng.gen_iter::<u64>().take(5).collect();
- v.sort();
- });
- b.bytes = 5 * mem::size_of::<u64>() as u64;
+ fn gen_ascending(len: usize) -> Vec<u64> {
+ (0..len as u64).collect()
}
- #[bench]
- fn sort_random_medium(b: &mut Bencher) {
- let mut rng = thread_rng();
- b.iter(|| {
- let mut v: Vec<_> = rng.gen_iter::<u64>().take(100).collect();
- v.sort();
- });
- b.bytes = 100 * mem::size_of::<u64>() as u64;
+ fn gen_descending(len: usize) -> Vec<u64> {
+ (0..len as u64).rev().collect()
}
- #[bench]
- fn sort_random_large(b: &mut Bencher) {
+ fn gen_random(len: usize) -> Vec<u64> {
let mut rng = thread_rng();
- b.iter(|| {
- let mut v: Vec<_> = rng.gen_iter::<u64>().take(10000).collect();
- v.sort();
- });
- b.bytes = 10000 * mem::size_of::<u64>() as u64;
+ rng.gen_iter::<u64>().take(len).collect()
}
- #[bench]
- fn sort_sorted(b: &mut Bencher) {
- let mut v: Vec<_> = (0..10000).collect();
- b.iter(|| {
- v.sort();
- });
- b.bytes = (v.len() * mem::size_of_val(&v[0])) as u64;
+ fn gen_mostly_ascending(len: usize) -> Vec<u64> {
+ let mut rng = thread_rng();
+ let mut v = gen_ascending(len);
+ for _ in (0usize..).take_while(|x| x * x <= len) {
+ let x = rng.gen::<usize>() % len;
+ let y = rng.gen::<usize>() % len;
+ v.swap(x, y);
+ }
+ v
}
- type BigSortable = (u64, u64, u64, u64);
-
- #[bench]
- fn sort_big_random_small(b: &mut Bencher) {
+ fn gen_mostly_descending(len: usize) -> Vec<u64> {
let mut rng = thread_rng();
- b.iter(|| {
- let mut v = rng.gen_iter::<BigSortable>()
- .take(5)
- .collect::<Vec<BigSortable>>();
- v.sort();
- });
- b.bytes = 5 * mem::size_of::<BigSortable>() as u64;
+ let mut v = gen_descending(len);
+ for _ in (0usize..).take_while(|x| x * x <= len) {
+ let x = rng.gen::<usize>() % len;
+ let y = rng.gen::<usize>() % len;
+ v.swap(x, y);
+ }
+ v
}
- #[bench]
- fn sort_big_random_medium(b: &mut Bencher) {
+ fn gen_big_random(len: usize) -> Vec<[u64; 16]> {
let mut rng = thread_rng();
- b.iter(|| {
- let mut v = rng.gen_iter::<BigSortable>()
- .take(100)
- .collect::<Vec<BigSortable>>();
- v.sort();
- });
- b.bytes = 100 * mem::size_of::<BigSortable>() as u64;
+ rng.gen_iter().map(|x| [x; 16]).take(len).collect()
}
- #[bench]
- fn sort_big_random_large(b: &mut Bencher) {
- let mut rng = thread_rng();
- b.iter(|| {
- let mut v = rng.gen_iter::<BigSortable>()
- .take(10000)
- .collect::<Vec<BigSortable>>();
- v.sort();
- });
- b.bytes = 10000 * mem::size_of::<BigSortable>() as u64;
+ fn gen_big_ascending(len: usize) -> Vec<[u64; 16]> {
+ (0..len as u64).map(|x| [x; 16]).take(len).collect()
}
+ fn gen_big_descending(len: usize) -> Vec<[u64; 16]> {
+ (0..len as u64).rev().map(|x| [x; 16]).take(len).collect()
+ }
+
+ macro_rules! sort_bench {
+ ($name:ident, $gen:expr, $len:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ b.iter(|| $gen($len).sort());
+ b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+ }
+ }
+ }
+
+ sort_bench!(sort_small_random, gen_random, 10);
+ sort_bench!(sort_small_ascending, gen_ascending, 10);
+ sort_bench!(sort_small_descending, gen_descending, 10);
+
+ sort_bench!(sort_small_big_random, gen_big_random, 10);
+ sort_bench!(sort_small_big_ascending, gen_big_ascending, 10);
+ sort_bench!(sort_small_big_descending, gen_big_descending, 10);
+
+ sort_bench!(sort_medium_random, gen_random, 100);
+ sort_bench!(sort_medium_ascending, gen_ascending, 100);
+ sort_bench!(sort_medium_descending, gen_descending, 100);
+
+ sort_bench!(sort_large_random, gen_random, 10000);
+ sort_bench!(sort_large_ascending, gen_ascending, 10000);
+ sort_bench!(sort_large_descending, gen_descending, 10000);
+ sort_bench!(sort_large_mostly_ascending, gen_mostly_ascending, 10000);
+ sort_bench!(sort_large_mostly_descending, gen_mostly_descending, 10000);
+
+ sort_bench!(sort_large_big_random, gen_big_random, 10000);
+ sort_bench!(sort_large_big_ascending, gen_big_ascending, 10000);
+ sort_bench!(sort_large_big_descending, gen_big_descending, 10000);
+
#[bench]
- fn sort_big_sorted(b: &mut Bencher) {
- let mut v: Vec<BigSortable> = (0..10000).map(|i| (i, i, i, i)).collect();
+ fn sort_large_random_expensive(b: &mut Bencher) {
+ let len = 10000;
b.iter(|| {
- v.sort();
+ let mut count = 0;
+ let cmp = move |a: &u64, b: &u64| {
+ count += 1;
+ if count % 1_000_000_000 == 0 {
+ panic!("should not happen");
+ }
+ (*a as f64).cos().partial_cmp(&(*b as f64).cos()).unwrap()
+ };
+
+ let mut v = gen_random(len);
+ v.sort_by(cmp);
+
+ black_box(count);
});
- b.bytes = (v.len() * mem::size_of_val(&v[0])) as u64;
+ b.bytes = len as u64 * mem::size_of::<u64>() as u64;
}
}
d
}
}
+
+#[test]
+fn test_is_empty() {
+ let mut v = VecDeque::<i32>::new();
+ assert!(v.is_empty());
+ assert!(v.iter().is_empty());
+ assert!(v.iter_mut().is_empty());
+ v.extend(&[2, 3, 4]);
+ assert!(!v.is_empty());
+ assert!(!v.iter().is_empty());
+ assert!(!v.iter_mut().is_empty());
+ while let Some(_) = v.pop_front() {
+ assert_eq!(v.is_empty(), v.len() == 0);
+ assert_eq!(v.iter().is_empty(), v.iter().len() == 0);
+ assert_eq!(v.iter_mut().is_empty(), v.iter_mut().len() == 0);
+ }
+ assert!(v.is_empty());
+ assert!(v.iter().is_empty());
+ assert!(v.iter_mut().is_empty());
+ assert!(v.into_iter().is_empty());
+}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for &'a mut I {}
+impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for &'a mut I {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
/// Trait to represent types that can be created by summing up an iterator.
///
fn len(&self) -> usize {
self.0.len()
}
+
+ #[inline]
+ fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
}
#[unstable(feature = "fused", issue = "35602")]
}
}
-impl<'a> Visitor for CheckAttrVisitor<'a> {
- fn visit_item(&mut self, item: &ast::Item) {
+impl<'a> Visitor<'a> for CheckAttrVisitor<'a> {
+ fn visit_item(&mut self, item: &'a ast::Item) {
let target = Target::from_item(item);
for attr in &item.attrs {
self.check_attribute(attr, target);
lctx: &'lcx mut LoweringContext<'interner>,
}
- impl<'lcx, 'interner> Visitor for ItemLowerer<'lcx, 'interner> {
- fn visit_item(&mut self, item: &Item) {
+ impl<'lcx, 'interner> Visitor<'lcx> for ItemLowerer<'lcx, 'interner> {
+ fn visit_item(&mut self, item: &'lcx Item) {
let hir_item = self.lctx.lower_item(item);
self.lctx.items.insert(item.id, hir_item);
visit::walk_item(self, item);
}
- fn visit_impl_item(&mut self, item: &ImplItem) {
+ fn visit_impl_item(&mut self, item: &'lcx ImplItem) {
let id = self.lctx.lower_impl_item_ref(item).id;
let hir_item = self.lctx.lower_impl_item(item);
self.lctx.impl_items.insert(id, hir_item);
}
}
-impl<'a> visit::Visitor for DefCollector<'a> {
- fn visit_item(&mut self, i: &Item) {
+impl<'a> visit::Visitor<'a> for DefCollector<'a> {
+ fn visit_item(&mut self, i: &'a Item) {
debug!("visit_item: {:?}", i);
// Pick the def data. This need not be unique, but the more
});
}
- fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
+ fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) {
let def = self.create_def(foreign_item.id,
DefPathData::ValueNs(foreign_item.ident.name.as_str()));
});
}
- fn visit_generics(&mut self, generics: &Generics) {
+ fn visit_generics(&mut self, generics: &'a Generics) {
for ty_param in generics.ty_params.iter() {
self.create_def(ty_param.id, DefPathData::TypeParam(ty_param.ident.name.as_str()));
}
visit::walk_generics(self, generics);
}
- fn visit_trait_item(&mut self, ti: &TraitItem) {
+ fn visit_trait_item(&mut self, ti: &'a TraitItem) {
let def_data = match ti.node {
TraitItemKind::Method(..) | TraitItemKind::Const(..) =>
DefPathData::ValueNs(ti.ident.name.as_str()),
});
}
- fn visit_impl_item(&mut self, ii: &ImplItem) {
+ fn visit_impl_item(&mut self, ii: &'a ImplItem) {
let def_data = match ii.node {
ImplItemKind::Method(..) | ImplItemKind::Const(..) =>
DefPathData::ValueNs(ii.ident.name.as_str()),
});
}
- fn visit_pat(&mut self, pat: &Pat) {
+ fn visit_pat(&mut self, pat: &'a Pat) {
let parent_def = self.parent_def;
match pat.node {
self.parent_def = parent_def;
}
- fn visit_expr(&mut self, expr: &Expr) {
+ fn visit_expr(&mut self, expr: &'a Expr) {
let parent_def = self.parent_def;
match expr.node {
self.parent_def = parent_def;
}
- fn visit_ty(&mut self, ty: &Ty) {
+ fn visit_ty(&mut self, ty: &'a Ty) {
match ty.node {
TyKind::Mac(..) => return self.visit_macro_invoc(ty.id, false),
TyKind::Array(_, ref length) => self.visit_ast_const_integer(length),
visit::walk_ty(self, ty);
}
- fn visit_lifetime_def(&mut self, def: &LifetimeDef) {
+ fn visit_lifetime_def(&mut self, def: &'a LifetimeDef) {
self.create_def(def.lifetime.id, DefPathData::LifetimeDef(def.lifetime.name.as_str()));
}
- fn visit_macro_def(&mut self, macro_def: &MacroDef) {
+ fn visit_macro_def(&mut self, macro_def: &'a MacroDef) {
self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.ident.name.as_str()));
}
- fn visit_stmt(&mut self, stmt: &Stmt) {
+ fn visit_stmt(&mut self, stmt: &'a Stmt) {
match stmt.node {
StmtKind::Mac(..) => self.visit_macro_invoc(stmt.id, false),
_ => visit::walk_stmt(self, stmt),
}
}
-impl LateLintPass for HardwiredLints {}
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for HardwiredLints {}
err
}
-pub trait LintContext: Sized {
+pub trait LintContext<'tcx>: Sized {
fn sess(&self) -> &Session;
fn lints(&self) -> &LintStore;
fn mut_lints(&mut self) -> &mut LintStore;
fn level_stack(&mut self) -> &mut Vec<(LintId, LevelSource)>;
- fn enter_attrs(&mut self, attrs: &[ast::Attribute]);
- fn exit_attrs(&mut self, attrs: &[ast::Attribute]);
+ fn enter_attrs(&mut self, attrs: &'tcx [ast::Attribute]);
+ fn exit_attrs(&mut self, attrs: &'tcx [ast::Attribute]);
/// Get the level of `lint` at the current position of the lint
/// traversal.
/// current lint context, call the provided function, then reset the
/// lints in effect to their previous state.
fn with_lint_attrs<F>(&mut self,
- attrs: &[ast::Attribute],
+ attrs: &'tcx [ast::Attribute],
f: F)
where F: FnOnce(&mut Self),
{
}
}
-impl<'a, 'tcx> LintContext for LateContext<'a, 'tcx> {
+impl<'a, 'tcx> LintContext<'tcx> for LateContext<'a, 'tcx> {
/// Get the overall compiler `Session` object.
fn sess(&self) -> &Session {
&self.tcx.sess
&mut self.level_stack
}
- fn enter_attrs(&mut self, attrs: &[ast::Attribute]) {
+ fn enter_attrs(&mut self, attrs: &'tcx [ast::Attribute]) {
debug!("late context: enter_attrs({:?})", attrs);
run_lints!(self, enter_lint_attrs, late_passes, attrs);
}
- fn exit_attrs(&mut self, attrs: &[ast::Attribute]) {
+ fn exit_attrs(&mut self, attrs: &'tcx [ast::Attribute]) {
debug!("late context: exit_attrs({:?})", attrs);
run_lints!(self, exit_lint_attrs, late_passes, attrs);
}
}
-impl<'a> LintContext for EarlyContext<'a> {
+impl<'a> LintContext<'a> for EarlyContext<'a> {
/// Get the overall compiler `Session` object.
fn sess(&self) -> &Session {
&self.sess
&mut self.level_stack
}
- fn enter_attrs(&mut self, attrs: &[ast::Attribute]) {
+ fn enter_attrs(&mut self, attrs: &'a [ast::Attribute]) {
debug!("early context: enter_attrs({:?})", attrs);
run_lints!(self, enter_lint_attrs, early_passes, attrs);
}
- fn exit_attrs(&mut self, attrs: &[ast::Attribute]) {
+ fn exit_attrs(&mut self, attrs: &'a [ast::Attribute]) {
debug!("early context: exit_attrs({:?})", attrs);
run_lints!(self, exit_lint_attrs, early_passes, attrs);
}
hir_visit::walk_path(self, p);
}
- fn visit_attribute(&mut self, attr: &ast::Attribute) {
+ fn visit_attribute(&mut self, attr: &'tcx ast::Attribute) {
check_lint_name_attribute(self, attr);
run_lints!(self, check_attribute, late_passes, attr);
}
}
-impl<'a> ast_visit::Visitor for EarlyContext<'a> {
- fn visit_item(&mut self, it: &ast::Item) {
+impl<'a> ast_visit::Visitor<'a> for EarlyContext<'a> {
+ fn visit_item(&mut self, it: &'a ast::Item) {
self.with_lint_attrs(&it.attrs, |cx| {
run_lints!(cx, check_item, early_passes, it);
ast_visit::walk_item(cx, it);
})
}
- fn visit_foreign_item(&mut self, it: &ast::ForeignItem) {
+ fn visit_foreign_item(&mut self, it: &'a ast::ForeignItem) {
self.with_lint_attrs(&it.attrs, |cx| {
run_lints!(cx, check_foreign_item, early_passes, it);
ast_visit::walk_foreign_item(cx, it);
})
}
- fn visit_pat(&mut self, p: &ast::Pat) {
+ fn visit_pat(&mut self, p: &'a ast::Pat) {
run_lints!(self, check_pat, early_passes, p);
ast_visit::walk_pat(self, p);
}
- fn visit_expr(&mut self, e: &ast::Expr) {
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
self.with_lint_attrs(&e.attrs, |cx| {
run_lints!(cx, check_expr, early_passes, e);
ast_visit::walk_expr(cx, e);
})
}
- fn visit_stmt(&mut self, s: &ast::Stmt) {
+ fn visit_stmt(&mut self, s: &'a ast::Stmt) {
run_lints!(self, check_stmt, early_passes, s);
ast_visit::walk_stmt(self, s);
}
- fn visit_fn(&mut self, fk: ast_visit::FnKind, decl: &ast::FnDecl,
+ fn visit_fn(&mut self, fk: ast_visit::FnKind<'a>, decl: &'a ast::FnDecl,
span: Span, id: ast::NodeId) {
run_lints!(self, check_fn, early_passes, fk, decl, span, id);
ast_visit::walk_fn(self, fk, decl, span);
}
fn visit_variant_data(&mut self,
- s: &ast::VariantData,
+ s: &'a ast::VariantData,
ident: ast::Ident,
- g: &ast::Generics,
+ g: &'a ast::Generics,
item_id: ast::NodeId,
_: Span) {
run_lints!(self, check_struct_def, early_passes, s, ident, g, item_id);
run_lints!(self, check_struct_def_post, early_passes, s, ident, g, item_id);
}
- fn visit_struct_field(&mut self, s: &ast::StructField) {
+ fn visit_struct_field(&mut self, s: &'a ast::StructField) {
self.with_lint_attrs(&s.attrs, |cx| {
run_lints!(cx, check_struct_field, early_passes, s);
ast_visit::walk_struct_field(cx, s);
})
}
- fn visit_variant(&mut self, v: &ast::Variant, g: &ast::Generics, item_id: ast::NodeId) {
+ fn visit_variant(&mut self, v: &'a ast::Variant, g: &'a ast::Generics, item_id: ast::NodeId) {
self.with_lint_attrs(&v.node.attrs, |cx| {
run_lints!(cx, check_variant, early_passes, v, g);
ast_visit::walk_variant(cx, v, g, item_id);
})
}
- fn visit_ty(&mut self, t: &ast::Ty) {
+ fn visit_ty(&mut self, t: &'a ast::Ty) {
run_lints!(self, check_ty, early_passes, t);
ast_visit::walk_ty(self, t);
}
run_lints!(self, check_ident, early_passes, sp, id);
}
- fn visit_mod(&mut self, m: &ast::Mod, s: Span, n: ast::NodeId) {
+ fn visit_mod(&mut self, m: &'a ast::Mod, s: Span, n: ast::NodeId) {
run_lints!(self, check_mod, early_passes, m, s, n);
ast_visit::walk_mod(self, m);
run_lints!(self, check_mod_post, early_passes, m, s, n);
}
- fn visit_local(&mut self, l: &ast::Local) {
+ fn visit_local(&mut self, l: &'a ast::Local) {
self.with_lint_attrs(&l.attrs, |cx| {
run_lints!(cx, check_local, early_passes, l);
ast_visit::walk_local(cx, l);
})
}
- fn visit_block(&mut self, b: &ast::Block) {
+ fn visit_block(&mut self, b: &'a ast::Block) {
run_lints!(self, check_block, early_passes, b);
ast_visit::walk_block(self, b);
run_lints!(self, check_block_post, early_passes, b);
}
- fn visit_arm(&mut self, a: &ast::Arm) {
+ fn visit_arm(&mut self, a: &'a ast::Arm) {
run_lints!(self, check_arm, early_passes, a);
ast_visit::walk_arm(self, a);
}
- fn visit_expr_post(&mut self, e: &ast::Expr) {
+ fn visit_expr_post(&mut self, e: &'a ast::Expr) {
run_lints!(self, check_expr_post, early_passes, e);
}
- fn visit_generics(&mut self, g: &ast::Generics) {
+ fn visit_generics(&mut self, g: &'a ast::Generics) {
run_lints!(self, check_generics, early_passes, g);
ast_visit::walk_generics(self, g);
}
- fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) {
+ fn visit_trait_item(&mut self, trait_item: &'a ast::TraitItem) {
self.with_lint_attrs(&trait_item.attrs, |cx| {
run_lints!(cx, check_trait_item, early_passes, trait_item);
ast_visit::walk_trait_item(cx, trait_item);
});
}
- fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) {
+ fn visit_impl_item(&mut self, impl_item: &'a ast::ImplItem) {
self.with_lint_attrs(&impl_item.attrs, |cx| {
run_lints!(cx, check_impl_item, early_passes, impl_item);
ast_visit::walk_impl_item(cx, impl_item);
});
}
- fn visit_lifetime(&mut self, lt: &ast::Lifetime) {
+ fn visit_lifetime(&mut self, lt: &'a ast::Lifetime) {
run_lints!(self, check_lifetime, early_passes, lt);
}
- fn visit_lifetime_def(&mut self, lt: &ast::LifetimeDef) {
+ fn visit_lifetime_def(&mut self, lt: &'a ast::LifetimeDef) {
run_lints!(self, check_lifetime_def, early_passes, lt);
}
- fn visit_path(&mut self, p: &ast::Path, id: ast::NodeId) {
+ fn visit_path(&mut self, p: &'a ast::Path, id: ast::NodeId) {
run_lints!(self, check_path, early_passes, p, id);
ast_visit::walk_path(self, p);
}
- fn visit_path_list_item(&mut self, prefix: &ast::Path, item: &ast::PathListItem) {
+ fn visit_path_list_item(&mut self, prefix: &'a ast::Path, item: &'a ast::PathListItem) {
run_lints!(self, check_path_list_item, early_passes, item);
ast_visit::walk_path_list_item(self, prefix, item);
}
- fn visit_attribute(&mut self, attr: &ast::Attribute) {
+ fn visit_attribute(&mut self, attr: &'a ast::Attribute) {
run_lints!(self, check_attribute, early_passes, attr);
}
}
//
// FIXME: eliminate the duplication with `Visitor`. But this also
// contains a few lint-specific methods with no equivalent in `Visitor`.
-pub trait LateLintPass: LintPass {
+pub trait LateLintPass<'a, 'tcx>: LintPass {
fn check_name(&mut self, _: &LateContext, _: Span, _: ast::Name) { }
- fn check_crate(&mut self, _: &LateContext, _: &hir::Crate) { }
- fn check_crate_post(&mut self, _: &LateContext, _: &hir::Crate) { }
- fn check_mod(&mut self, _: &LateContext, _: &hir::Mod, _: Span, _: ast::NodeId) { }
- fn check_mod_post(&mut self, _: &LateContext, _: &hir::Mod, _: Span, _: ast::NodeId) { }
- fn check_foreign_item(&mut self, _: &LateContext, _: &hir::ForeignItem) { }
- fn check_foreign_item_post(&mut self, _: &LateContext, _: &hir::ForeignItem) { }
- fn check_item(&mut self, _: &LateContext, _: &hir::Item) { }
- fn check_item_post(&mut self, _: &LateContext, _: &hir::Item) { }
- fn check_local(&mut self, _: &LateContext, _: &hir::Local) { }
- fn check_block(&mut self, _: &LateContext, _: &hir::Block) { }
- fn check_block_post(&mut self, _: &LateContext, _: &hir::Block) { }
- fn check_stmt(&mut self, _: &LateContext, _: &hir::Stmt) { }
- fn check_arm(&mut self, _: &LateContext, _: &hir::Arm) { }
- fn check_pat(&mut self, _: &LateContext, _: &hir::Pat) { }
- fn check_decl(&mut self, _: &LateContext, _: &hir::Decl) { }
- fn check_expr(&mut self, _: &LateContext, _: &hir::Expr) { }
- fn check_expr_post(&mut self, _: &LateContext, _: &hir::Expr) { }
- fn check_ty(&mut self, _: &LateContext, _: &hir::Ty) { }
- fn check_generics(&mut self, _: &LateContext, _: &hir::Generics) { }
- fn check_fn(&mut self, _: &LateContext,
- _: FnKind, _: &hir::FnDecl, _: &hir::Expr, _: Span, _: ast::NodeId) { }
- fn check_fn_post(&mut self, _: &LateContext,
- _: FnKind, _: &hir::FnDecl, _: &hir::Expr, _: Span, _: ast::NodeId) { }
- fn check_trait_item(&mut self, _: &LateContext, _: &hir::TraitItem) { }
- fn check_trait_item_post(&mut self, _: &LateContext, _: &hir::TraitItem) { }
- fn check_impl_item(&mut self, _: &LateContext, _: &hir::ImplItem) { }
- fn check_impl_item_post(&mut self, _: &LateContext, _: &hir::ImplItem) { }
- fn check_struct_def(&mut self, _: &LateContext,
- _: &hir::VariantData, _: ast::Name, _: &hir::Generics, _: ast::NodeId) { }
- fn check_struct_def_post(&mut self, _: &LateContext,
- _: &hir::VariantData, _: ast::Name, _: &hir::Generics, _: ast::NodeId) { }
- fn check_struct_field(&mut self, _: &LateContext, _: &hir::StructField) { }
- fn check_variant(&mut self, _: &LateContext, _: &hir::Variant, _: &hir::Generics) { }
- fn check_variant_post(&mut self, _: &LateContext, _: &hir::Variant, _: &hir::Generics) { }
- fn check_lifetime(&mut self, _: &LateContext, _: &hir::Lifetime) { }
- fn check_lifetime_def(&mut self, _: &LateContext, _: &hir::LifetimeDef) { }
- fn check_path(&mut self, _: &LateContext, _: &hir::Path, _: ast::NodeId) { }
- fn check_attribute(&mut self, _: &LateContext, _: &ast::Attribute) { }
+ fn check_crate(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Crate) { }
+ fn check_crate_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Crate) { }
+ fn check_mod(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: &'tcx hir::Mod,
+ _: Span,
+ _: ast::NodeId) { }
+ fn check_mod_post(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: &'tcx hir::Mod,
+ _: Span,
+ _: ast::NodeId) { }
+ fn check_foreign_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ForeignItem) { }
+ fn check_foreign_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ForeignItem) { }
+ fn check_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Item) { }
+ fn check_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Item) { }
+ fn check_local(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Local) { }
+ fn check_block(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Block) { }
+ fn check_block_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Block) { }
+ fn check_stmt(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Stmt) { }
+ fn check_arm(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Arm) { }
+ fn check_pat(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Pat) { }
+ fn check_decl(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Decl) { }
+ fn check_expr(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Expr) { }
+ fn check_expr_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Expr) { }
+ fn check_ty(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Ty) { }
+ fn check_generics(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Generics) { }
+ fn check_fn(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: FnKind<'tcx>,
+ _: &'tcx hir::FnDecl,
+ _: &'tcx hir::Expr,
+ _: Span,
+ _: ast::NodeId) { }
+ fn check_fn_post(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: FnKind<'tcx>,
+ _: &'tcx hir::FnDecl,
+ _: &'tcx hir::Expr,
+ _: Span,
+ _: ast::NodeId) { }
+ fn check_trait_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::TraitItem) { }
+ fn check_trait_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::TraitItem) { }
+ fn check_impl_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ImplItem) { }
+ fn check_impl_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ImplItem) { }
+ fn check_struct_def(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: &'tcx hir::VariantData,
+ _: ast::Name,
+ _: &'tcx hir::Generics,
+ _: ast::NodeId) { }
+ fn check_struct_def_post(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: &'tcx hir::VariantData,
+ _: ast::Name,
+ _: &'tcx hir::Generics,
+ _: ast::NodeId) { }
+ fn check_struct_field(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::StructField) { }
+ fn check_variant(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: &'tcx hir::Variant,
+ _: &'tcx hir::Generics) { }
+ fn check_variant_post(&mut self,
+ _: &LateContext<'a, 'tcx>,
+ _: &'tcx hir::Variant,
+ _: &'tcx hir::Generics) { }
+ fn check_lifetime(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Lifetime) { }
+ fn check_lifetime_def(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::LifetimeDef) { }
+ fn check_path(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Path, _: ast::NodeId) { }
+ fn check_attribute(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx ast::Attribute) { }
/// Called when entering a syntax node that can have lint attributes such
/// as `#[allow(...)]`. Called with *all* the attributes of that node.
- fn enter_lint_attrs(&mut self, _: &LateContext, _: &[ast::Attribute]) { }
+ fn enter_lint_attrs(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx [ast::Attribute]) { }
/// Counterpart to `enter_lint_attrs`.
- fn exit_lint_attrs(&mut self, _: &LateContext, _: &[ast::Attribute]) { }
+ fn exit_lint_attrs(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx [ast::Attribute]) { }
}
pub trait EarlyLintPass: LintPass {
/// A lint pass boxed up as a trait object.
pub type EarlyLintPassObject = Box<EarlyLintPass + 'static>;
-pub type LateLintPassObject = Box<LateLintPass + 'static>;
+pub type LateLintPassObject = Box<for<'a, 'tcx> LateLintPass<'a, 'tcx> + 'static>;
/// Identifies a lint known to the compiler.
#[derive(Clone, Copy, Debug)]
}
}
-impl LateLintPass for NonCamelCaseTypes {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonCamelCaseTypes {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
let extern_repr_count = it.attrs
.iter()
}
}
-impl LateLintPass for NonSnakeCase {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonSnakeCase {
fn check_crate(&mut self, cx: &LateContext, cr: &hir::Crate) {
let attr_crate_name = cr.attrs
.iter()
}
}
-impl LateLintPass for NonUpperCaseGlobals {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonUpperCaseGlobals {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
match it.node {
hir::ItemStatic(..) => {
}
}
-impl LateLintPass for WhileTrue {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for WhileTrue {
fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) {
if let hir::ExprWhile(ref cond, ..) = e.node {
if let hir::ExprLit(ref lit) = cond.node {
pub struct BoxPointers;
impl BoxPointers {
- fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext<'a, 'tcx>, span: Span, ty: Ty<'tcx>) {
+ fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext, span: Span, ty: Ty) {
for leaf_ty in ty.walk() {
if let ty::TyBox(_) = leaf_ty.sty {
let m = format!("type uses owned (Box type) pointers: {}", ty);
}
}
-impl LateLintPass for BoxPointers {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for BoxPointers {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
match it.node {
hir::ItemFn(..) |
}
}
-impl LateLintPass for NonShorthandFieldPatterns {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonShorthandFieldPatterns {
fn check_pat(&mut self, cx: &LateContext, pat: &hir::Pat) {
if let PatKind::Struct(_, ref field_pats, _) = pat.node {
for fieldpat in field_pats {
}
}
-impl LateLintPass for UnsafeCode {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnsafeCode {
fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) {
if let hir::ExprBlock(ref blk) = e.node {
// Don't warn about generated blocks, that'll just pollute the output.
fn check_fn(&mut self,
cx: &LateContext,
- fk: FnKind,
+ fk: FnKind<'tcx>,
_: &hir::FnDecl,
_: &hir::Expr,
span: Span,
}
}
-impl LateLintPass for MissingDoc {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc {
fn enter_lint_attrs(&mut self, _: &LateContext, attrs: &[ast::Attribute]) {
let doc_hidden = self.doc_hidden() ||
attrs.iter().any(|attr| {
self.doc_hidden_stack.push(doc_hidden);
}
- fn exit_lint_attrs(&mut self, _: &LateContext, _: &[ast::Attribute]) {
+ fn exit_lint_attrs(&mut self, _: &LateContext, _attrs: &[ast::Attribute]) {
self.doc_hidden_stack.pop().expect("empty doc_hidden_stack");
}
}
}
-impl LateLintPass for MissingCopyImplementations {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingCopyImplementations {
fn check_item(&mut self, cx: &LateContext, item: &hir::Item) {
if !cx.access_levels.is_reachable(item.id) {
return;
}
}
-impl LateLintPass for MissingDebugImplementations {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDebugImplementations {
fn check_item(&mut self, cx: &LateContext, item: &hir::Item) {
if !cx.access_levels.is_reachable(item.id) {
return;
}
}
-impl LateLintPass for UnconditionalRecursion {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnconditionalRecursion {
fn check_fn(&mut self,
cx: &LateContext,
fn_kind: FnKind,
}
}
-impl LateLintPass for PluginAsLibrary {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PluginAsLibrary {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
if cx.sess().plugin_registrar_fn.get().is_some() {
// We're compiling a plugin; it's fine to link other plugins.
}
}
-impl LateLintPass for InvalidNoMangleItems {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidNoMangleItems {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
match it.node {
hir::ItemFn(.., ref generics, _) => {
}
}
-impl LateLintPass for MutableTransmutes {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MutableTransmutes {
fn check_expr(&mut self, cx: &LateContext, expr: &hir::Expr) {
use syntax::abi::Abi::RustIntrinsic;
}
}
-impl LateLintPass for UnstableFeatures {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnstableFeatures {
fn check_attribute(&mut self, ctx: &LateContext, attr: &ast::Attribute) {
if attr.meta().check_name("feature") {
if let Some(items) = attr.meta().meta_item_list() {
}
}
-impl LateLintPass for UnionsWithDropFields {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnionsWithDropFields {
fn check_item(&mut self, ctx: &LateContext, item: &hir::Item) {
if let hir::ItemUnion(ref vdata, _) = item.node {
let param_env = &ty::ParameterEnvironment::for_item(ctx.tcx, item.id);
}
}
-impl LateLintPass for TypeLimits {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits {
fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) {
match e.node {
hir::ExprUnary(hir::UnNeg, ref expr) => {
}
}
-impl LateLintPass for ImproperCTypes {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ImproperCTypes {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
let mut vis = ImproperCTypesVisitor { cx: cx };
if let hir::ItemForeignMod(ref nmod) = it.node {
}
}
-impl LateLintPass for VariantSizeDifferences {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
if let hir::ItemEnum(ref enum_definition, ref gens) = it.node {
if gens.ty_params.is_empty() {
}
}
-impl LateLintPass for UnusedMut {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedMut {
fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) {
if let hir::ExprMatch(_, ref arms, _) = e.node {
for a in arms {
}
}
-impl LateLintPass for UnusedResults {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults {
fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) {
let expr = match s.node {
hir::StmtSemi(ref expr, _) => &**expr,
}
}
-impl LateLintPass for UnusedUnsafe {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedUnsafe {
fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) {
if let hir::ExprBlock(ref blk) = e.node {
// Don't warn about generated blocks, that'll just pollute the output.
}
}
-impl LateLintPass for PathStatements {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PathStatements {
fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) {
if let hir::StmtSemi(ref expr, _) = s.node {
if let hir::ExprPath(_) = expr.node {
}
}
-impl LateLintPass for UnusedAttributes {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedAttributes {
fn check_attribute(&mut self, cx: &LateContext, attr: &ast::Attribute) {
debug!("checking attribute: {:?}", attr);
}
}
-impl LateLintPass for UnusedAllocation {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedAllocation {
fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) {
match e.node {
hir::ExprBox(_) => {}
}
}
-impl<'a> Visitor for AstValidator<'a> {
- fn visit_lifetime(&mut self, lt: &Lifetime) {
+impl<'a> Visitor<'a> for AstValidator<'a> {
+ fn visit_lifetime(&mut self, lt: &'a Lifetime) {
if lt.name == "'_" {
self.session.add_lint(lint::builtin::LIFETIME_UNDERSCORE,
lt.id,
visit::walk_lifetime(self, lt)
}
- fn visit_expr(&mut self, expr: &Expr) {
+ fn visit_expr(&mut self, expr: &'a Expr) {
match expr.node {
ExprKind::While(.., Some(ident)) |
ExprKind::Loop(_, Some(ident)) |
visit::walk_expr(self, expr)
}
- fn visit_ty(&mut self, ty: &Ty) {
+ fn visit_ty(&mut self, ty: &'a Ty) {
match ty.node {
TyKind::BareFn(ref bfty) => {
self.check_decl_no_pat(&bfty.decl, |span, _| {
visit::walk_ty(self, ty)
}
- fn visit_path(&mut self, path: &Path, id: NodeId) {
+ fn visit_path(&mut self, path: &'a Path, id: NodeId) {
if path.global && path.segments.len() > 0 {
let ident = path.segments[0].identifier;
if token::Ident(ident).is_path_segment_keyword() {
visit::walk_path(self, path)
}
- fn visit_item(&mut self, item: &Item) {
+ fn visit_item(&mut self, item: &'a Item) {
match item.node {
ItemKind::Use(ref view_path) => {
let path = view_path.node.path();
visit::walk_item(self, item)
}
- fn visit_foreign_item(&mut self, fi: &ForeignItem) {
+ fn visit_foreign_item(&mut self, fi: &'a ForeignItem) {
match fi.node {
ForeignItemKind::Fn(ref decl, _) => {
self.check_decl_no_pat(decl, |span, is_recent| {
visit::walk_foreign_item(self, fi)
}
- fn visit_vis(&mut self, vis: &Visibility) {
+ fn visit_vis(&mut self, vis: &'a Visibility) {
match *vis {
Visibility::Restricted { ref path, .. } => {
if !path.segments.iter().all(|segment| segment.parameters.is_empty()) {
collector.print("HIR STATS");
}
-pub fn print_ast_stats(krate: &ast::Crate, title: &str) {
+pub fn print_ast_stats<'v>(krate: &'v ast::Crate, title: &str) {
let mut collector = StatCollector {
krate: None,
data: FxHashMap(),
}
}
-impl<'v> ast_visit::Visitor for StatCollector<'v> {
+impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
- fn visit_mod(&mut self, m: &ast::Mod, _s: Span, _n: NodeId) {
+ fn visit_mod(&mut self, m: &'v ast::Mod, _s: Span, _n: NodeId) {
self.record("Mod", Id::None, m);
ast_visit::walk_mod(self, m)
}
- fn visit_foreign_item(&mut self, i: &ast::ForeignItem) {
+ fn visit_foreign_item(&mut self, i: &'v ast::ForeignItem) {
self.record("ForeignItem", Id::None, i);
ast_visit::walk_foreign_item(self, i)
}
- fn visit_item(&mut self, i: &ast::Item) {
+ fn visit_item(&mut self, i: &'v ast::Item) {
self.record("Item", Id::None, i);
ast_visit::walk_item(self, i)
}
- fn visit_local(&mut self, l: &ast::Local) {
+ fn visit_local(&mut self, l: &'v ast::Local) {
self.record("Local", Id::None, l);
ast_visit::walk_local(self, l)
}
- fn visit_block(&mut self, b: &ast::Block) {
+ fn visit_block(&mut self, b: &'v ast::Block) {
self.record("Block", Id::None, b);
ast_visit::walk_block(self, b)
}
- fn visit_stmt(&mut self, s: &ast::Stmt) {
+ fn visit_stmt(&mut self, s: &'v ast::Stmt) {
self.record("Stmt", Id::None, s);
ast_visit::walk_stmt(self, s)
}
- fn visit_arm(&mut self, a: &ast::Arm) {
+ fn visit_arm(&mut self, a: &'v ast::Arm) {
self.record("Arm", Id::None, a);
ast_visit::walk_arm(self, a)
}
- fn visit_pat(&mut self, p: &ast::Pat) {
+ fn visit_pat(&mut self, p: &'v ast::Pat) {
self.record("Pat", Id::None, p);
ast_visit::walk_pat(self, p)
}
- fn visit_expr(&mut self, ex: &ast::Expr) {
+ fn visit_expr(&mut self, ex: &'v ast::Expr) {
self.record("Expr", Id::None, ex);
ast_visit::walk_expr(self, ex)
}
- fn visit_ty(&mut self, t: &ast::Ty) {
+ fn visit_ty(&mut self, t: &'v ast::Ty) {
self.record("Ty", Id::None, t);
ast_visit::walk_ty(self, t)
}
fn visit_fn(&mut self,
- fk: ast_visit::FnKind,
- fd: &ast::FnDecl,
+ fk: ast_visit::FnKind<'v>,
+ fd: &'v ast::FnDecl,
s: Span,
_: NodeId) {
self.record("FnDecl", Id::None, fd);
ast_visit::walk_fn(self, fk, fd, s)
}
- fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
+ fn visit_trait_item(&mut self, ti: &'v ast::TraitItem) {
self.record("TraitItem", Id::None, ti);
ast_visit::walk_trait_item(self, ti)
}
- fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
+ fn visit_impl_item(&mut self, ii: &'v ast::ImplItem) {
self.record("ImplItem", Id::None, ii);
ast_visit::walk_impl_item(self, ii)
}
- fn visit_ty_param_bound(&mut self, bounds: &ast::TyParamBound) {
+ fn visit_ty_param_bound(&mut self, bounds: &'v ast::TyParamBound) {
self.record("TyParamBound", Id::None, bounds);
ast_visit::walk_ty_param_bound(self, bounds)
}
- fn visit_struct_field(&mut self, s: &ast::StructField) {
+ fn visit_struct_field(&mut self, s: &'v ast::StructField) {
self.record("StructField", Id::None, s);
ast_visit::walk_struct_field(self, s)
}
fn visit_variant(&mut self,
- v: &ast::Variant,
- g: &ast::Generics,
+ v: &'v ast::Variant,
+ g: &'v ast::Generics,
item_id: NodeId) {
self.record("Variant", Id::None, v);
ast_visit::walk_variant(self, v, g, item_id)
}
- fn visit_lifetime(&mut self, lifetime: &ast::Lifetime) {
+ fn visit_lifetime(&mut self, lifetime: &'v ast::Lifetime) {
self.record("Lifetime", Id::None, lifetime);
ast_visit::walk_lifetime(self, lifetime)
}
- fn visit_lifetime_def(&mut self, lifetime: &ast::LifetimeDef) {
+ fn visit_lifetime_def(&mut self, lifetime: &'v ast::LifetimeDef) {
self.record("LifetimeDef", Id::None, lifetime);
ast_visit::walk_lifetime_def(self, lifetime)
}
- fn visit_mac(&mut self, mac: &ast::Mac) {
+ fn visit_mac(&mut self, mac: &'v ast::Mac) {
self.record("Mac", Id::None, mac);
}
fn visit_path_list_item(&mut self,
- prefix: &ast::Path,
- item: &ast::PathListItem) {
+ prefix: &'v ast::Path,
+ item: &'v ast::PathListItem) {
self.record("PathListItem", Id::None, item);
ast_visit::walk_path_list_item(self, prefix, item)
}
fn visit_path_segment(&mut self,
path_span: Span,
- path_segment: &ast::PathSegment) {
+ path_segment: &'v ast::PathSegment) {
self.record("PathSegment", Id::None, path_segment);
ast_visit::walk_path_segment(self, path_span, path_segment)
}
- fn visit_assoc_type_binding(&mut self, type_binding: &ast::TypeBinding) {
+ fn visit_assoc_type_binding(&mut self, type_binding: &'v ast::TypeBinding) {
self.record("TypeBinding", Id::None, type_binding);
ast_visit::walk_assoc_type_binding(self, type_binding)
}
- fn visit_attribute(&mut self, attr: &ast::Attribute) {
+ fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
self.record("Attribute", Id::None, attr);
}
- fn visit_macro_def(&mut self, macro_def: &ast::MacroDef) {
+ fn visit_macro_def(&mut self, macro_def: &'v ast::MacroDef) {
self.record("MacroDef", Id::None, macro_def);
ast_visit::walk_macro_def(self, macro_def)
}
sess: &'a Session,
}
-impl<'a> Visitor for CheckNoAsm<'a> {
- fn visit_expr(&mut self, e: &ast::Expr) {
+impl<'a> Visitor<'a> for CheckNoAsm<'a> {
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
match e.node {
ast::ExprKind::InlineAsm(_) => {
span_err!(self.sess,
macro_rules! method {
($visit:ident: $ty:ty, $invoc:path, $walk:ident) => {
- fn $visit(&mut self, node: &$ty) {
+ fn $visit(&mut self, node: &'a $ty) {
if let $invoc(..) = node.node {
self.visit_invoc(node.id);
} else {
}
}
-impl<'a, 'b> Visitor for BuildReducedGraphVisitor<'a, 'b> {
+impl<'a, 'b> Visitor<'a> for BuildReducedGraphVisitor<'a, 'b> {
method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item);
method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr);
method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat);
method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty);
- fn visit_item(&mut self, item: &Item) {
+ fn visit_item(&mut self, item: &'a Item) {
let macro_use = match item.node {
ItemKind::Mac(..) if item.id == ast::DUMMY_NODE_ID => return, // Scope placeholder
ItemKind::Mac(..) => {
}
}
- fn visit_stmt(&mut self, stmt: &ast::Stmt) {
+ fn visit_stmt(&mut self, stmt: &'a ast::Stmt) {
if let ast::StmtKind::Mac(..) = stmt.node {
self.legacy_scope = LegacyScope::Expansion(self.visit_invoc(stmt.id));
} else {
}
}
- fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
+ fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) {
self.resolver.build_reduced_graph_for_foreign_item(foreign_item, self.expansion);
visit::walk_foreign_item(self, foreign_item);
}
- fn visit_block(&mut self, block: &Block) {
+ fn visit_block(&mut self, block: &'a Block) {
let (parent, legacy_scope) = (self.resolver.current_module, self.legacy_scope);
self.resolver.build_reduced_graph_for_block(block);
visit::walk_block(self, block);
self.legacy_scope = legacy_scope;
}
- fn visit_trait_item(&mut self, item: &TraitItem) {
+ fn visit_trait_item(&mut self, item: &'a TraitItem) {
let parent = self.resolver.current_module;
let def_id = parent.def_id().unwrap();
}
}
-impl<'a, 'b> Visitor for UnusedImportCheckVisitor<'a, 'b> {
- fn visit_item(&mut self, item: &ast::Item) {
+impl<'a, 'b> Visitor<'a> for UnusedImportCheckVisitor<'a, 'b> {
+ fn visit_item(&mut self, item: &'a ast::Item) {
visit::walk_item(self, item);
// Ignore is_public import statements because there's no way to be sure
// whether they're used or not. Also ignore imports with a dummy span
}
ViewPathList(_, ref list) => {
+ if list.len() == 0 {
+ self.unused_imports
+ .entry(item.id)
+ .or_insert_with(NodeMap)
+ .insert(item.id, item.span);
+ }
for i in list {
self.check_import(item.id, i.node.id, i.span);
}
}
}
-impl<'a> Visitor for Resolver<'a> {
- fn visit_item(&mut self, item: &Item) {
+impl<'a, 'tcx> Visitor<'tcx> for Resolver<'a> {
+ fn visit_item(&mut self, item: &'tcx Item) {
self.resolve_item(item);
}
- fn visit_arm(&mut self, arm: &Arm) {
+ fn visit_arm(&mut self, arm: &'tcx Arm) {
self.resolve_arm(arm);
}
- fn visit_block(&mut self, block: &Block) {
+ fn visit_block(&mut self, block: &'tcx Block) {
self.resolve_block(block);
}
- fn visit_expr(&mut self, expr: &Expr) {
+ fn visit_expr(&mut self, expr: &'tcx Expr) {
self.resolve_expr(expr, None);
}
- fn visit_local(&mut self, local: &Local) {
+ fn visit_local(&mut self, local: &'tcx Local) {
self.resolve_local(local);
}
- fn visit_ty(&mut self, ty: &Ty) {
+ fn visit_ty(&mut self, ty: &'tcx Ty) {
self.resolve_type(ty);
}
- fn visit_poly_trait_ref(&mut self, tref: &ast::PolyTraitRef, m: &ast::TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self,
+ tref: &'tcx ast::PolyTraitRef,
+ m: &'tcx ast::TraitBoundModifier) {
let ast::Path { ref segments, span, global } = tref.trait_ref.path;
let path: Vec<_> = segments.iter().map(|seg| seg.identifier).collect();
let def = self.resolve_trait_reference(&path, global, None, span);
visit::walk_poly_trait_ref(self, tref, m);
}
fn visit_variant(&mut self,
- variant: &ast::Variant,
- generics: &Generics,
+ variant: &'tcx ast::Variant,
+ generics: &'tcx Generics,
item_id: ast::NodeId) {
if let Some(ref dis_expr) = variant.node.disr_expr {
// resolve the discriminator expr as a constant
item_id,
variant.span);
}
- fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
+ fn visit_foreign_item(&mut self, foreign_item: &'tcx ForeignItem) {
let type_parameters = match foreign_item.node {
ForeignItemKind::Fn(_, ref generics) => {
HasTypeParameters(generics, ItemRibKind)
});
}
fn visit_fn(&mut self,
- function_kind: FnKind,
- declaration: &FnDecl,
+ function_kind: FnKind<'tcx>,
+ declaration: &'tcx FnDecl,
_: Span,
node_id: NodeId) {
let rib_kind = match function_kind {
}
}
- fn process_formals(&mut self, formals: &Vec<ast::Arg>, qualname: &str) {
+ fn process_formals(&mut self, formals: &'l [ast::Arg], qualname: &str) {
for arg in formals {
self.visit_pat(&arg.pat);
let mut collector = PathCollector::new();
}
fn process_method(&mut self,
- sig: &ast::MethodSig,
- body: Option<&ast::Block>,
+ sig: &'l ast::MethodSig,
+ body: Option<&'l ast::Block>,
id: ast::NodeId,
name: ast::Name,
vis: Visibility,
- attrs: &[Attribute],
+ attrs: &'l [Attribute],
span: Span) {
debug!("process_method: {}:{}", id, name);
}
}
- fn process_trait_ref(&mut self, trait_ref: &ast::TraitRef) {
+ fn process_trait_ref(&mut self, trait_ref: &'l ast::TraitRef) {
let trait_ref_data = self.save_ctxt.get_trait_ref_data(trait_ref, self.cur_scope);
if let Some(trait_ref_data) = trait_ref_data {
if !self.span.filter_generated(Some(trait_ref_data.span), trait_ref.path.span) {
// Dump generic params bindings, then visit_generics
fn process_generic_params(&mut self,
- generics: &ast::Generics,
+ generics: &'l ast::Generics,
full_span: Span,
prefix: &str,
id: NodeId) {
}
fn process_fn(&mut self,
- item: &ast::Item,
- decl: &ast::FnDecl,
- ty_params: &ast::Generics,
- body: &ast::Block) {
+ item: &'l ast::Item,
+ decl: &'l ast::FnDecl,
+ ty_params: &'l ast::Generics,
+ body: &'l ast::Block) {
if let Some(fn_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(fn_data, FunctionData, item.span);
if !self.span.filter_generated(Some(fn_data.span), item.span) {
self.nest(item.id, |v| v.visit_block(&body));
}
- fn process_static_or_const_item(&mut self, item: &ast::Item, typ: &ast::Ty, expr: &ast::Expr) {
+ fn process_static_or_const_item(&mut self,
+ item: &'l ast::Item,
+ typ: &'l ast::Ty,
+ expr: &'l ast::Expr) {
if let Some(var_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(var_data, VariableData, item.span);
if !self.span.filter_generated(Some(var_data.span), item.span) {
id: ast::NodeId,
name: ast::Name,
span: Span,
- typ: &ast::Ty,
- expr: &ast::Expr,
+ typ: &'l ast::Ty,
+ expr: &'l ast::Expr,
parent_id: DefId,
vis: Visibility,
- attrs: &[Attribute]) {
+ attrs: &'l [Attribute]) {
let qualname = format!("::{}", self.tcx.node_path_str(id));
let sub_span = self.span.sub_span_after_keyword(span, keywords::Const);
// FIXME tuple structs should generate tuple-specific data.
fn process_struct(&mut self,
- item: &ast::Item,
- def: &ast::VariantData,
- ty_params: &ast::Generics) {
+ item: &'l ast::Item,
+ def: &'l ast::VariantData,
+ ty_params: &'l ast::Generics) {
let name = item.ident.to_string();
let qualname = format!("::{}", self.tcx.node_path_str(item.id));
}
fn process_enum(&mut self,
- item: &ast::Item,
- enum_definition: &ast::EnumDef,
- ty_params: &ast::Generics) {
+ item: &'l ast::Item,
+ enum_definition: &'l ast::EnumDef,
+ ty_params: &'l ast::Generics) {
let enum_data = self.save_ctxt.get_item_data(item);
let enum_data = match enum_data {
None => return,
}
fn process_impl(&mut self,
- item: &ast::Item,
- type_parameters: &ast::Generics,
- trait_ref: &Option<ast::TraitRef>,
- typ: &ast::Ty,
- impl_items: &[ast::ImplItem]) {
+ item: &'l ast::Item,
+ type_parameters: &'l ast::Generics,
+ trait_ref: &'l Option<ast::TraitRef>,
+ typ: &'l ast::Ty,
+ impl_items: &'l [ast::ImplItem]) {
let mut has_self_ref = false;
if let Some(impl_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(impl_data, ImplData, item.span);
}
fn process_trait(&mut self,
- item: &ast::Item,
- generics: &ast::Generics,
- trait_refs: &ast::TyParamBounds,
- methods: &[ast::TraitItem]) {
+ item: &'l ast::Item,
+ generics: &'l ast::Generics,
+ trait_refs: &'l ast::TyParamBounds,
+ methods: &'l [ast::TraitItem]) {
let name = item.ident.to_string();
let qualname = format!("::{}", self.tcx.node_path_str(item.id));
let mut val = name.clone();
}
fn process_struct_lit(&mut self,
- ex: &ast::Expr,
- path: &ast::Path,
- fields: &Vec<ast::Field>,
- variant: &ty::VariantDef,
- base: &Option<P<ast::Expr>>) {
+ ex: &'l ast::Expr,
+ path: &'l ast::Path,
+ fields: &'l [ast::Field],
+ variant: &'l ty::VariantDef,
+ base: &'l Option<P<ast::Expr>>) {
self.write_sub_paths_truncated(path, false);
if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) {
walk_list!(self, visit_expr, base);
}
- fn process_method_call(&mut self, ex: &ast::Expr, args: &Vec<P<ast::Expr>>) {
+ fn process_method_call(&mut self, ex: &'l ast::Expr, args: &'l [P<ast::Expr>]) {
if let Some(mcd) = self.save_ctxt.get_expr_data(ex) {
down_cast_data!(mcd, MethodCallData, ex.span);
if !self.span.filter_generated(Some(mcd.span), ex.span) {
walk_list!(self, visit_expr, args);
}
- fn process_pat(&mut self, p: &ast::Pat) {
+ fn process_pat(&mut self, p: &'l ast::Pat) {
match p.node {
PatKind::Struct(ref path, ref fields, _) => {
visit::walk_path(self, path);
}
- fn process_var_decl(&mut self, p: &ast::Pat, value: String) {
+ fn process_var_decl(&mut self, p: &'l ast::Pat, value: String) {
// The local could declare multiple new vars, we must walk the
// pattern and collect them all.
let mut collector = PathCollector::new();
}
}
- fn process_trait_item(&mut self, trait_item: &ast::TraitItem, trait_id: DefId) {
+ fn process_trait_item(&mut self, trait_item: &'l ast::TraitItem, trait_id: DefId) {
self.process_macro_use(trait_item.span, trait_item.id);
match trait_item.node {
ast::TraitItemKind::Const(ref ty, Some(ref expr)) => {
}
}
- fn process_impl_item(&mut self, impl_item: &ast::ImplItem, impl_id: DefId) {
+ fn process_impl_item(&mut self, impl_item: &'l ast::ImplItem, impl_id: DefId) {
self.process_macro_use(impl_item.span, impl_item.id);
match impl_item.node {
ast::ImplItemKind::Const(ref ty, ref expr) => {
}
}
-impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> {
- fn visit_item(&mut self, item: &ast::Item) {
+impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor<'l> for DumpVisitor<'l, 'tcx, 'll, D> {
+ fn visit_item(&mut self, item: &'l ast::Item) {
use syntax::ast::ItemKind::*;
self.process_macro_use(item.span, item.id);
match item.node {
}
}
- fn visit_generics(&mut self, generics: &ast::Generics) {
+ fn visit_generics(&mut self, generics: &'l ast::Generics) {
for param in generics.ty_params.iter() {
for bound in param.bounds.iter() {
if let ast::TraitTyParamBound(ref trait_ref, _) = *bound {
}
}
- fn visit_ty(&mut self, t: &ast::Ty) {
+ fn visit_ty(&mut self, t: &'l ast::Ty) {
self.process_macro_use(t.span, t.id);
match t.node {
ast::TyKind::Path(_, ref path) => {
+ if self.span.filter_generated(None, t.span) {
+ return;
+ }
+
if let Some(id) = self.lookup_def_id(t.id) {
let sub_span = self.span.sub_span_for_type_name(t.span);
- if !self.span.filter_generated(sub_span, t.span) {
- self.dumper.type_ref(TypeRefData {
- span: sub_span.expect("No span found for type ref"),
- ref_id: Some(id),
- scope: self.cur_scope,
- qualname: String::new()
- }.lower(self.tcx));
- }
+ self.dumper.type_ref(TypeRefData {
+ span: sub_span.expect("No span found for type ref"),
+ ref_id: Some(id),
+ scope: self.cur_scope,
+ qualname: String::new()
+ }.lower(self.tcx));
}
self.write_sub_paths_truncated(path, false);
}
}
- fn visit_expr(&mut self, ex: &ast::Expr) {
+ fn visit_expr(&mut self, ex: &'l ast::Expr) {
self.process_macro_use(ex.span, ex.id);
match ex.node {
ast::ExprKind::Call(ref _f, ref _args) => {
}
}
- fn visit_mac(&mut self, mac: &ast::Mac) {
+ fn visit_mac(&mut self, mac: &'l ast::Mac) {
// These shouldn't exist in the AST at this point, log a span bug.
span_bug!(mac.span, "macro invocation should have been expanded out of AST");
}
- fn visit_pat(&mut self, p: &ast::Pat) {
+ fn visit_pat(&mut self, p: &'l ast::Pat) {
self.process_macro_use(p.span, p.id);
self.process_pat(p);
}
- fn visit_arm(&mut self, arm: &ast::Arm) {
+ fn visit_arm(&mut self, arm: &'l ast::Arm) {
let mut collector = PathCollector::new();
for pattern in &arm.pats {
// collect paths from the arm's patterns
self.visit_expr(&arm.body);
}
- fn visit_stmt(&mut self, s: &ast::Stmt) {
+ fn visit_stmt(&mut self, s: &'l ast::Stmt) {
self.process_macro_use(s.span, s.id);
visit::walk_stmt(self, s)
}
- fn visit_local(&mut self, l: &ast::Local) {
+ fn visit_local(&mut self, l: &'l ast::Local) {
self.process_macro_use(l.span, l.id);
let value = l.init.as_ref().map(|i| self.span.snippet(i.span)).unwrap_or(String::new());
self.process_var_decl(&l.pat, value);
match typ.node {
// Common case impl for a struct or something basic.
ast::TyKind::Path(None, ref path) => {
+ filter!(self.span_utils, None, path.span, None);
sub_span = self.span_utils.sub_span_for_type_name(path.span);
- filter!(self.span_utils, sub_span, path.span, None);
type_data = self.lookup_ref_id(typ.id).map(|id| {
TypeRefData {
span: sub_span.unwrap(),
}
}
-impl Visitor for PathCollector {
+impl<'a> Visitor<'a> for PathCollector {
fn visit_pat(&mut self, p: &ast::Pat) {
match p.node {
PatKind::Struct(ref path, ..) => {
use std::collections::BTreeMap;
use std::default::Default;
use std::error;
-use std::fmt::{self, Display, Formatter};
+use std::fmt::{self, Display, Formatter, Write as FmtWrite};
use std::fs::{self, File, OpenOptions};
use std::io::prelude::*;
use std::io::{self, BufWriter, BufReader};
// Update the search index
let dst = cx.dst.join("search-index.js");
- let all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst);
+ let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst);
+ all_indexes.push(search_index);
+ // Sort the indexes by crate so the file will be generated identically even
+ // with rustdoc running in parallel.
+ all_indexes.sort();
let mut w = try_err!(File::create(&dst), &dst);
try_err!(writeln!(&mut w, "var searchIndex = {{}};"), &dst);
- try_err!(writeln!(&mut w, "{}", search_index), &dst);
for index in &all_indexes {
try_err!(writeln!(&mut w, "{}", *index), &dst);
}
// Update the list of all implementors for traits
let dst = cx.dst.join("implementors");
- try_err!(mkdir(&dst), &dst);
for (&did, imps) in &cache.implementors {
// Private modules can leak through to this phase of rustdoc, which
// could contain implementations for otherwise private types. In some
}
};
+ let mut implementors = format!(r#"implementors["{}"] = ["#, krate.name);
+ for imp in imps {
+ // If the trait and implementation are in the same crate, then
+ // there's no need to emit information about it (there's inlining
+ // going on). If they're in different crates then the crate defining
+ // the trait will be interested in our implementation.
+ if imp.def_id.krate == did.krate { continue }
+ write!(implementors, r#""{}","#, imp.impl_).unwrap();
+ }
+ implementors.push_str("];");
+
let mut mydst = dst.clone();
for part in &remote_path[..remote_path.len() - 1] {
mydst.push(part);
- try_err!(mkdir(&mydst), &mydst);
}
+ try_err!(fs::create_dir_all(&mydst), &mydst);
mydst.push(&format!("{}.{}.js",
remote_item_type.css_class(),
remote_path[remote_path.len() - 1]));
- let all_implementors = try_err!(collect(&mydst, &krate.name,
- "implementors"),
- &mydst);
- try_err!(mkdir(mydst.parent().unwrap()),
- &mydst.parent().unwrap().to_path_buf());
- let mut f = BufWriter::new(try_err!(File::create(&mydst), &mydst));
- try_err!(writeln!(&mut f, "(function() {{var implementors = {{}};"), &mydst);
+ let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst);
+ all_implementors.push(implementors);
+ // Sort the implementors by crate so the file will be generated
+ // identically even with rustdoc running in parallel.
+ all_implementors.sort();
+ let mut f = try_err!(File::create(&mydst), &mydst);
+ try_err!(writeln!(&mut f, "(function() {{var implementors = {{}};"), &mydst);
for implementor in &all_implementors {
- try_err!(write!(&mut f, "{}", *implementor), &mydst);
- }
-
- try_err!(write!(&mut f, r#"implementors["{}"] = ["#, krate.name), &mydst);
- for imp in imps {
- // If the trait and implementation are in the same crate, then
- // there's no need to emit information about it (there's inlining
- // going on). If they're in different crates then the crate defining
- // the trait will be interested in our implementation.
- if imp.def_id.krate == did.krate { continue }
- try_err!(write!(&mut f, r#""{}","#, imp.impl_), &mydst);
+ try_err!(writeln!(&mut f, "{}", *implementor), &mydst);
}
- try_err!(writeln!(&mut f, r"];"), &mydst);
try_err!(writeln!(&mut f, "{}", r"
if (window.register_implementors) {
window.register_implementors(implementors);
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Default for DefaultHasher {
+ /// Creates a new `DefaultHasher` using [`DefaultHasher::new`]. See
+ /// [`DefaultHasher::new`] documentation for more information.
+ ///
+ /// [`DefaultHasher::new`]: #method.new
fn default() -> DefaultHasher {
DefaultHasher::new()
}
#[stable(feature = "env", since = "1.0.0")]
impl ExactSizeIterator for Args {
fn len(&self) -> usize { self.inner.len() }
+ fn is_empty(&self) -> bool { self.inner.is_empty() }
}
#[stable(feature = "env_iterators", since = "1.11.0")]
#[stable(feature = "env", since = "1.0.0")]
impl ExactSizeIterator for ArgsOs {
fn len(&self) -> usize { self.inner.len() }
+ fn is_empty(&self) -> bool { self.inner.is_empty() }
}
#[stable(feature = "env_iterators", since = "1.11.0")]
#![feature(core_float)]
#![feature(core_intrinsics)]
#![feature(dropck_parametricity)]
+#![feature(exact_size_is_empty)]
#![feature(float_extras)]
#![feature(float_from_str_radix)]
#![feature(fn_traits)]
}
/// A struct providing information about a panic.
+///
+/// `PanicInfo` structure is passed to a panic hook set by the [`set_hook()`]
+/// function.
+///
+/// [`set_hook()`]: ../../std/panic/fn.set_hook.html
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|panic_info| {
+/// println!("panic occured: {:?}", panic_info.payload().downcast_ref::<&str>().unwrap());
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub struct PanicInfo<'a> {
payload: &'a (Any + Send),
impl<'a> PanicInfo<'a> {
/// Returns the payload associated with the panic.
///
- /// This will commonly, but not always, be a `&'static str` or `String`.
+ /// This will commonly, but not always, be a `&'static str` or [`String`].
+ ///
+ /// [`String`]: ../../std/string/struct.String.html
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// println!("panic occured: {:?}", panic_info.payload().downcast_ref::<&str>().unwrap());
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub fn payload(&self) -> &(Any + Send) {
self.payload
/// Returns information about the location from which the panic originated,
/// if available.
///
- /// This method will currently always return `Some`, but this may change
+ /// This method will currently always return [`Some`], but this may change
/// in future versions.
+ ///
+ /// [`Some`]: ../../std/option/enum.Option.html#variant.Some
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occured in file '{}' at line {}", location.file(), location.line());
+ /// } else {
+ /// println!("panic occured but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub fn location(&self) -> Option<&Location> {
Some(&self.location)
}
/// A struct containing information about the location of a panic.
+///
+/// This structure is created by the [`location()`] method of [`PanicInfo`].
+///
+/// [`location()`]: ../../std/panic/struct.PanicInfo.html#method.location
+/// [`PanicInfo`]: ../../std/panic/struct.PanicInfo.html
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|panic_info| {
+/// if let Some(location) = panic_info.location() {
+/// println!("panic occured in file '{}' at line {}", location.file(), location.line());
+/// } else {
+/// println!("panic occured but can't get location information...");
+/// }
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub struct Location<'a> {
file: &'a str,
impl<'a> Location<'a> {
/// Returns the name of the source file from which the panic originated.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occured in file '{}'", location.file());
+ /// } else {
+ /// println!("panic occured but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub fn file(&self) -> &str {
self.file
}
/// Returns the line number from which the panic originated.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occured at line {}", location.line());
+ /// } else {
+ /// println!("panic occured but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
#[stable(feature = "panic_hooks", since = "1.10.0")]
pub fn line(&self) -> u32 {
self.line
/// will be run. If a clean shutdown is needed it is recommended to only call
/// this function at a known point where there are no more destructors left
/// to run.
+///
+/// # Examples
+///
+/// ```
+/// use std::process;
+///
+/// process::exit(0);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn exit(code: i32) -> ! {
::sys_common::cleanup();
extern {
fn sel_registerName(name: *const libc::c_uchar) -> Sel;
- fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId;
fn objc_getClass(class_name: *const libc::c_uchar) -> NsId;
}
+ #[cfg(target_arch="aarch64")]
+ extern {
+ fn objc_msgSend(obj: NsId, sel: Sel) -> NsId;
+ #[link_name="objc_msgSend"]
+ fn objc_msgSend_ul(obj: NsId, sel: Sel, i: libc::c_ulong) -> NsId;
+ }
+
+ #[cfg(not(target_arch="aarch64"))]
+ extern {
+ fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId;
+ #[link_name="objc_msgSend"]
+ fn objc_msgSend_ul(obj: NsId, sel: Sel, ...) -> NsId;
+ }
+
#[link(name = "Foundation", kind = "framework")]
#[link(name = "objc")]
#[cfg(not(cargobuild))]
let cnt: usize = mem::transmute(objc_msgSend(args, count_sel));
for i in 0..cnt {
- let tmp = objc_msgSend(args, object_at_sel, i);
+ let tmp = objc_msgSend_ul(args, object_at_sel, i as libc::c_ulong);
let utf_c_str: *const libc::c_char =
mem::transmute(objc_msgSend(tmp, utf8_sel));
let bytes = CStr::from_ptr(utf_c_str).to_bytes();
/// descriptor.
#[stable(feature = "from_raw_os", since = "1.1.0")]
pub trait FromRawFd {
- /// Constructs a new instances of `Self` from the given raw file
+ /// Constructs a new instance of `Self` from the given raw file
/// descriptor.
///
/// This function **consumes ownership** of the specified file
static errno: c_int;
}
- errno as i32
+ unsafe { errno as i32 }
}
/// Gets a detailed string description for the given error number.
fn description(&self) -> &str { "failed to join paths" }
}
-#[cfg(target_os = "freebsd")]
+#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
let mut mib = [libc::CTL_KERN as c_int,
}
}
-#[cfg(target_os = "dragonfly")]
-pub fn current_exe() -> io::Result<PathBuf> {
- ::fs::read_link("/proc/curproc/file")
-}
-
#[cfg(target_os = "netbsd")]
pub fn current_exe() -> io::Result<PathBuf> {
::fs::read_link("/proc/curproc/exe")
}
}
- pub fn visit_with<V: Visitor>(&self, visitor: &mut V) {
+ pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) {
match *self {
Expansion::OptExpr(Some(ref expr)) => visitor.visit_expr(expr),
Expansion::OptExpr(None) => {}
s.as_bytes().first().cloned().map_or(false, |b| b >= b'0' && b <= b'9')
}
-impl<'a> Visitor for PostExpansionVisitor<'a> {
+impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
fn visit_attribute(&mut self, attr: &ast::Attribute) {
if !self.context.cm.span_allows_unstable(attr.span) {
// check for gated attributes
}
}
- fn visit_item(&mut self, i: &ast::Item) {
+ fn visit_item(&mut self, i: &'a ast::Item) {
match i.node {
ast::ItemKind::ExternCrate(_) => {
if attr::contains_name(&i.attrs[..], "macro_reexport") {
visit::walk_item(self, i);
}
- fn visit_foreign_item(&mut self, i: &ast::ForeignItem) {
+ fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) {
let links_to_llvm = match attr::first_attr_value_str_by_name(&i.attrs, "link_name") {
Some(val) => val.as_str().starts_with("llvm."),
_ => false
visit::walk_foreign_item(self, i)
}
- fn visit_ty(&mut self, ty: &ast::Ty) {
+ fn visit_ty(&mut self, ty: &'a ast::Ty) {
match ty.node {
ast::TyKind::BareFn(ref bare_fn_ty) => {
self.check_abi(bare_fn_ty.abi, ty.span);
visit::walk_ty(self, ty)
}
- fn visit_fn_ret_ty(&mut self, ret_ty: &ast::FunctionRetTy) {
+ fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FunctionRetTy) {
if let ast::FunctionRetTy::Ty(ref output_ty) = *ret_ty {
match output_ty.node {
ast::TyKind::Never => return,
}
}
- fn visit_expr(&mut self, e: &ast::Expr) {
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
match e.node {
ast::ExprKind::Box(_) => {
gate_feature_post!(&self, box_syntax, e.span, EXPLAIN_BOX_SYNTAX);
visit::walk_expr(self, e);
}
- fn visit_pat(&mut self, pattern: &ast::Pat) {
+ fn visit_pat(&mut self, pattern: &'a ast::Pat) {
match pattern.node {
PatKind::Slice(_, Some(_), ref last) if !last.is_empty() => {
gate_feature_post!(&self, advanced_slice_patterns,
}
fn visit_fn(&mut self,
- fn_kind: FnKind,
- fn_decl: &ast::FnDecl,
+ fn_kind: FnKind<'a>,
+ fn_decl: &'a ast::FnDecl,
span: Span,
_node_id: NodeId) {
// check for const fn declarations
visit::walk_fn(self, fn_kind, fn_decl, span);
}
- fn visit_trait_item(&mut self, ti: &ast::TraitItem) {
+ fn visit_trait_item(&mut self, ti: &'a ast::TraitItem) {
match ti.node {
ast::TraitItemKind::Const(..) => {
gate_feature_post!(&self, associated_consts,
visit::walk_trait_item(self, ti);
}
- fn visit_impl_item(&mut self, ii: &ast::ImplItem) {
+ fn visit_impl_item(&mut self, ii: &'a ast::ImplItem) {
if ii.defaultness == ast::Defaultness::Default {
gate_feature_post!(&self, specialization,
ii.span,
visit::walk_impl_item(self, ii);
}
- fn visit_vis(&mut self, vis: &ast::Visibility) {
+ fn visit_vis(&mut self, vis: &'a ast::Visibility) {
let span = match *vis {
ast::Visibility::Crate(span) => span,
ast::Visibility::Restricted { ref path, .. } => path.span,
visit::walk_vis(self, vis)
}
- fn visit_generics(&mut self, g: &ast::Generics) {
+ fn visit_generics(&mut self, g: &'a ast::Generics) {
for t in &g.ty_params {
if !t.attrs.is_empty() {
gate_feature_post!(&self, generic_param_attrs, t.attrs[0].span,
visit::walk_generics(self, g)
}
- fn visit_lifetime_def(&mut self, lifetime_def: &ast::LifetimeDef) {
+ fn visit_lifetime_def(&mut self, lifetime_def: &'a ast::LifetimeDef) {
if !lifetime_def.attrs.is_empty() {
gate_feature_post!(&self, generic_param_attrs, lifetime_def.attrs[0].span,
"attributes on lifetime bindings are experimental");
struct PatIdentVisitor {
spans: Vec<Span>
}
- impl ::visit::Visitor for PatIdentVisitor {
- fn visit_pat(&mut self, p: &ast::Pat) {
+ impl<'a> ::visit::Visitor<'a> for PatIdentVisitor {
+ fn visit_pat(&mut self, p: &'a ast::Pat) {
match p.node {
PatKind::Ident(_ , ref spannedident, _) => {
self.spans.push(spannedident.span.clone());
mode: Mode,
}
-impl<'a> Visitor for ShowSpanVisitor<'a> {
- fn visit_expr(&mut self, e: &ast::Expr) {
+impl<'a> Visitor<'a> for ShowSpanVisitor<'a> {
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
if let Mode::Expression = self.mode {
self.span_diagnostic.span_warn(e.span, "expression");
}
visit::walk_expr(self, e);
}
- fn visit_pat(&mut self, p: &ast::Pat) {
+ fn visit_pat(&mut self, p: &'a ast::Pat) {
if let Mode::Pattern = self.mode {
self.span_diagnostic.span_warn(p.span, "pattern");
}
visit::walk_pat(self, p);
}
- fn visit_ty(&mut self, t: &ast::Ty) {
+ fn visit_ty(&mut self, t: &'a ast::Ty) {
if let Mode::Type = self.mode {
self.span_diagnostic.span_warn(t.span, "type");
}
visit::walk_ty(self, t);
}
- fn visit_mac(&mut self, mac: &ast::Mac) {
+ fn visit_mac(&mut self, mac: &'a ast::Mac) {
visit::walk_mac(self, mac);
}
}
}
}
-impl Visitor for NodeCounter {
+impl<'ast> Visitor<'ast> for NodeCounter {
fn visit_ident(&mut self, span: Span, ident: Ident) {
self.count += 1;
walk_ident(self, span, ident);
/// explicitly, you need to override each method. (And you also need
/// to monitor future changes to `Visitor` in case a new method with a
/// new default implementation gets introduced.)
-pub trait Visitor: Sized {
+pub trait Visitor<'ast>: Sized {
fn visit_name(&mut self, _span: Span, _name: Name) {
// Nothing to do.
}
fn visit_ident(&mut self, span: Span, ident: Ident) {
walk_ident(self, span, ident);
}
- fn visit_mod(&mut self, m: &Mod, _s: Span, _n: NodeId) { walk_mod(self, m) }
- fn visit_foreign_item(&mut self, i: &ForeignItem) { walk_foreign_item(self, i) }
- fn visit_item(&mut self, i: &Item) { walk_item(self, i) }
- fn visit_local(&mut self, l: &Local) { walk_local(self, l) }
- fn visit_block(&mut self, b: &Block) { walk_block(self, b) }
- fn visit_stmt(&mut self, s: &Stmt) { walk_stmt(self, s) }
- fn visit_arm(&mut self, a: &Arm) { walk_arm(self, a) }
- fn visit_pat(&mut self, p: &Pat) { walk_pat(self, p) }
- fn visit_expr(&mut self, ex: &Expr) { walk_expr(self, ex) }
- fn visit_expr_post(&mut self, _ex: &Expr) { }
- fn visit_ty(&mut self, t: &Ty) { walk_ty(self, t) }
- fn visit_generics(&mut self, g: &Generics) { walk_generics(self, g) }
- fn visit_fn(&mut self, fk: FnKind, fd: &FnDecl, s: Span, _: NodeId) {
+ fn visit_mod(&mut self, m: &'ast Mod, _s: Span, _n: NodeId) { walk_mod(self, m) }
+ fn visit_foreign_item(&mut self, i: &'ast ForeignItem) { walk_foreign_item(self, i) }
+ fn visit_item(&mut self, i: &'ast Item) { walk_item(self, i) }
+ fn visit_local(&mut self, l: &'ast Local) { walk_local(self, l) }
+ fn visit_block(&mut self, b: &'ast Block) { walk_block(self, b) }
+ fn visit_stmt(&mut self, s: &'ast Stmt) { walk_stmt(self, s) }
+ fn visit_arm(&mut self, a: &'ast Arm) { walk_arm(self, a) }
+ fn visit_pat(&mut self, p: &'ast Pat) { walk_pat(self, p) }
+ fn visit_expr(&mut self, ex: &'ast Expr) { walk_expr(self, ex) }
+ fn visit_expr_post(&mut self, _ex: &'ast Expr) { }
+ fn visit_ty(&mut self, t: &'ast Ty) { walk_ty(self, t) }
+ fn visit_generics(&mut self, g: &'ast Generics) { walk_generics(self, g) }
+ fn visit_fn(&mut self, fk: FnKind<'ast>, fd: &'ast FnDecl, s: Span, _: NodeId) {
walk_fn(self, fk, fd, s)
}
- fn visit_trait_item(&mut self, ti: &TraitItem) { walk_trait_item(self, ti) }
- fn visit_impl_item(&mut self, ii: &ImplItem) { walk_impl_item(self, ii) }
- fn visit_trait_ref(&mut self, t: &TraitRef) { walk_trait_ref(self, t) }
- fn visit_ty_param_bound(&mut self, bounds: &TyParamBound) {
+ fn visit_trait_item(&mut self, ti: &'ast TraitItem) { walk_trait_item(self, ti) }
+ fn visit_impl_item(&mut self, ii: &'ast ImplItem) { walk_impl_item(self, ii) }
+ fn visit_trait_ref(&mut self, t: &'ast TraitRef) { walk_trait_ref(self, t) }
+ fn visit_ty_param_bound(&mut self, bounds: &'ast TyParamBound) {
walk_ty_param_bound(self, bounds)
}
- fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef, m: &TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
walk_poly_trait_ref(self, t, m)
}
- fn visit_variant_data(&mut self, s: &VariantData, _: Ident,
- _: &Generics, _: NodeId, _: Span) {
+ fn visit_variant_data(&mut self, s: &'ast VariantData, _: Ident,
+ _: &'ast Generics, _: NodeId, _: Span) {
walk_struct_def(self, s)
}
- fn visit_struct_field(&mut self, s: &StructField) { walk_struct_field(self, s) }
- fn visit_enum_def(&mut self, enum_definition: &EnumDef,
- generics: &Generics, item_id: NodeId, _: Span) {
+ fn visit_struct_field(&mut self, s: &'ast StructField) { walk_struct_field(self, s) }
+ fn visit_enum_def(&mut self, enum_definition: &'ast EnumDef,
+ generics: &'ast Generics, item_id: NodeId, _: Span) {
walk_enum_def(self, enum_definition, generics, item_id)
}
- fn visit_variant(&mut self, v: &Variant, g: &Generics, item_id: NodeId) {
+ fn visit_variant(&mut self, v: &'ast Variant, g: &'ast Generics, item_id: NodeId) {
walk_variant(self, v, g, item_id)
}
- fn visit_lifetime(&mut self, lifetime: &Lifetime) {
+ fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) {
walk_lifetime(self, lifetime)
}
- fn visit_lifetime_def(&mut self, lifetime: &LifetimeDef) {
+ fn visit_lifetime_def(&mut self, lifetime: &'ast LifetimeDef) {
walk_lifetime_def(self, lifetime)
}
- fn visit_mac(&mut self, _mac: &Mac) {
+ fn visit_mac(&mut self, _mac: &'ast Mac) {
panic!("visit_mac disabled by default");
// NB: see note about macros above.
// if you really want a visitor that
// definition in your trait impl:
// visit::walk_mac(self, _mac)
}
- fn visit_path(&mut self, path: &Path, _id: NodeId) {
+ fn visit_path(&mut self, path: &'ast Path, _id: NodeId) {
walk_path(self, path)
}
- fn visit_path_list_item(&mut self, prefix: &Path, item: &PathListItem) {
+ fn visit_path_list_item(&mut self, prefix: &'ast Path, item: &'ast PathListItem) {
walk_path_list_item(self, prefix, item)
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &PathSegment) {
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
walk_path_segment(self, path_span, path_segment)
}
- fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &PathParameters) {
+ fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'ast PathParameters) {
walk_path_parameters(self, path_span, path_parameters)
}
- fn visit_assoc_type_binding(&mut self, type_binding: &TypeBinding) {
+ fn visit_assoc_type_binding(&mut self, type_binding: &'ast TypeBinding) {
walk_assoc_type_binding(self, type_binding)
}
- fn visit_attribute(&mut self, _attr: &Attribute) {}
- fn visit_macro_def(&mut self, macro_def: &MacroDef) {
+ fn visit_attribute(&mut self, _attr: &'ast Attribute) {}
+ fn visit_macro_def(&mut self, macro_def: &'ast MacroDef) {
walk_macro_def(self, macro_def)
}
- fn visit_vis(&mut self, vis: &Visibility) {
+ fn visit_vis(&mut self, vis: &'ast Visibility) {
walk_vis(self, vis)
}
- fn visit_fn_ret_ty(&mut self, ret_ty: &FunctionRetTy) {
+ fn visit_fn_ret_ty(&mut self, ret_ty: &'ast FunctionRetTy) {
walk_fn_ret_ty(self, ret_ty)
}
}
}
}
-pub fn walk_opt_name<V: Visitor>(visitor: &mut V, span: Span, opt_name: Option<Name>) {
+pub fn walk_opt_name<'a, V: Visitor<'a>>(visitor: &mut V, span: Span, opt_name: Option<Name>) {
if let Some(name) = opt_name {
visitor.visit_name(span, name);
}
}
-pub fn walk_opt_ident<V: Visitor>(visitor: &mut V, span: Span, opt_ident: Option<Ident>) {
+pub fn walk_opt_ident<'a, V: Visitor<'a>>(visitor: &mut V, span: Span, opt_ident: Option<Ident>) {
if let Some(ident) = opt_ident {
visitor.visit_ident(span, ident);
}
}
-pub fn walk_opt_sp_ident<V: Visitor>(visitor: &mut V, opt_sp_ident: &Option<Spanned<Ident>>) {
+pub fn walk_opt_sp_ident<'a, V: Visitor<'a>>(visitor: &mut V,
+ opt_sp_ident: &Option<Spanned<Ident>>) {
if let Some(ref sp_ident) = *opt_sp_ident {
visitor.visit_ident(sp_ident.span, sp_ident.node);
}
}
-pub fn walk_ident<V: Visitor>(visitor: &mut V, span: Span, ident: Ident) {
+pub fn walk_ident<'a, V: Visitor<'a>>(visitor: &mut V, span: Span, ident: Ident) {
visitor.visit_name(span, ident.name);
}
-pub fn walk_crate<V: Visitor>(visitor: &mut V, krate: &Crate) {
+pub fn walk_crate<'a, V: Visitor<'a>>(visitor: &mut V, krate: &'a Crate) {
visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID);
walk_list!(visitor, visit_attribute, &krate.attrs);
walk_list!(visitor, visit_macro_def, &krate.exported_macros);
}
-pub fn walk_macro_def<V: Visitor>(visitor: &mut V, macro_def: &MacroDef) {
+pub fn walk_macro_def<'a, V: Visitor<'a>>(visitor: &mut V, macro_def: &'a MacroDef) {
visitor.visit_ident(macro_def.span, macro_def.ident);
walk_opt_ident(visitor, macro_def.span, macro_def.imported_from);
walk_list!(visitor, visit_attribute, ¯o_def.attrs);
}
-pub fn walk_mod<V: Visitor>(visitor: &mut V, module: &Mod) {
+pub fn walk_mod<'a, V: Visitor<'a>>(visitor: &mut V, module: &'a Mod) {
walk_list!(visitor, visit_item, &module.items);
}
-pub fn walk_local<V: Visitor>(visitor: &mut V, local: &Local) {
+pub fn walk_local<'a, V: Visitor<'a>>(visitor: &mut V, local: &'a Local) {
for attr in local.attrs.iter() {
visitor.visit_attribute(attr);
}
walk_list!(visitor, visit_expr, &local.init);
}
-pub fn walk_lifetime<V: Visitor>(visitor: &mut V, lifetime: &Lifetime) {
+pub fn walk_lifetime<'a, V: Visitor<'a>>(visitor: &mut V, lifetime: &'a Lifetime) {
visitor.visit_name(lifetime.span, lifetime.name);
}
-pub fn walk_lifetime_def<V: Visitor>(visitor: &mut V, lifetime_def: &LifetimeDef) {
+pub fn walk_lifetime_def<'a, V: Visitor<'a>>(visitor: &mut V, lifetime_def: &'a LifetimeDef) {
visitor.visit_lifetime(&lifetime_def.lifetime);
walk_list!(visitor, visit_lifetime, &lifetime_def.bounds);
walk_list!(visitor, visit_attribute, &*lifetime_def.attrs);
}
-pub fn walk_poly_trait_ref<V>(visitor: &mut V, trait_ref: &PolyTraitRef, _: &TraitBoundModifier)
- where V: Visitor,
+pub fn walk_poly_trait_ref<'a, V>(visitor: &mut V,
+ trait_ref: &'a PolyTraitRef,
+ _: &TraitBoundModifier)
+ where V: Visitor<'a>,
{
walk_list!(visitor, visit_lifetime_def, &trait_ref.bound_lifetimes);
visitor.visit_trait_ref(&trait_ref.trait_ref);
}
-pub fn walk_trait_ref<V: Visitor>(visitor: &mut V, trait_ref: &TraitRef) {
+pub fn walk_trait_ref<'a, V: Visitor<'a>>(visitor: &mut V, trait_ref: &'a TraitRef) {
visitor.visit_path(&trait_ref.path, trait_ref.ref_id)
}
-pub fn walk_item<V: Visitor>(visitor: &mut V, item: &Item) {
+pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
visitor.visit_vis(&item.vis);
visitor.visit_ident(item.span, item.ident);
match item.node {
walk_list!(visitor, visit_attribute, &item.attrs);
}
-pub fn walk_enum_def<V: Visitor>(visitor: &mut V,
- enum_definition: &EnumDef,
- generics: &Generics,
+pub fn walk_enum_def<'a, V: Visitor<'a>>(visitor: &mut V,
+ enum_definition: &'a EnumDef,
+ generics: &'a Generics,
item_id: NodeId) {
walk_list!(visitor, visit_variant, &enum_definition.variants, generics, item_id);
}
-pub fn walk_variant<V>(visitor: &mut V, variant: &Variant, generics: &Generics, item_id: NodeId)
- where V: Visitor,
+pub fn walk_variant<'a, V>(visitor: &mut V,
+ variant: &'a Variant,
+ generics: &'a Generics,
+ item_id: NodeId)
+ where V: Visitor<'a>,
{
visitor.visit_ident(variant.span, variant.node.name);
visitor.visit_variant_data(&variant.node.data, variant.node.name,
walk_list!(visitor, visit_attribute, &variant.node.attrs);
}
-pub fn walk_ty<V: Visitor>(visitor: &mut V, typ: &Ty) {
+pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) {
match typ.node {
TyKind::Slice(ref ty) | TyKind::Paren(ref ty) => {
visitor.visit_ty(ty)
}
}
-pub fn walk_path<V: Visitor>(visitor: &mut V, path: &Path) {
+pub fn walk_path<'a, V: Visitor<'a>>(visitor: &mut V, path: &'a Path) {
for segment in &path.segments {
visitor.visit_path_segment(path.span, segment);
}
}
-pub fn walk_path_list_item<V: Visitor>(visitor: &mut V, _prefix: &Path, item: &PathListItem) {
+pub fn walk_path_list_item<'a, V: Visitor<'a>>(visitor: &mut V,
+ _prefix: &Path,
+ item: &'a PathListItem) {
visitor.visit_ident(item.span, item.node.name);
walk_opt_ident(visitor, item.span, item.node.rename);
}
-pub fn walk_path_segment<V: Visitor>(visitor: &mut V, path_span: Span, segment: &PathSegment) {
+pub fn walk_path_segment<'a, V: Visitor<'a>>(visitor: &mut V,
+ path_span: Span,
+ segment: &'a PathSegment) {
visitor.visit_ident(path_span, segment.identifier);
visitor.visit_path_parameters(path_span, &segment.parameters);
}
-pub fn walk_path_parameters<V>(visitor: &mut V, _path_span: Span, path_parameters: &PathParameters)
- where V: Visitor,
+pub fn walk_path_parameters<'a, V>(visitor: &mut V,
+ _path_span: Span,
+ path_parameters: &'a PathParameters)
+ where V: Visitor<'a>,
{
match *path_parameters {
PathParameters::AngleBracketed(ref data) => {
}
}
-pub fn walk_assoc_type_binding<V: Visitor>(visitor: &mut V, type_binding: &TypeBinding) {
+pub fn walk_assoc_type_binding<'a, V: Visitor<'a>>(visitor: &mut V,
+ type_binding: &'a TypeBinding) {
visitor.visit_ident(type_binding.span, type_binding.ident);
visitor.visit_ty(&type_binding.ty);
}
-pub fn walk_pat<V: Visitor>(visitor: &mut V, pattern: &Pat) {
+pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) {
match pattern.node {
PatKind::TupleStruct(ref path, ref children, _) => {
visitor.visit_path(path, pattern.id);
}
}
-pub fn walk_foreign_item<V: Visitor>(visitor: &mut V, foreign_item: &ForeignItem) {
+pub fn walk_foreign_item<'a, V: Visitor<'a>>(visitor: &mut V, foreign_item: &'a ForeignItem) {
visitor.visit_vis(&foreign_item.vis);
visitor.visit_ident(foreign_item.span, foreign_item.ident);
walk_list!(visitor, visit_attribute, &foreign_item.attrs);
}
-pub fn walk_ty_param_bound<V: Visitor>(visitor: &mut V, bound: &TyParamBound) {
+pub fn walk_ty_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a TyParamBound) {
match *bound {
TraitTyParamBound(ref typ, ref modifier) => {
visitor.visit_poly_trait_ref(typ, modifier);
}
}
-pub fn walk_generics<V: Visitor>(visitor: &mut V, generics: &Generics) {
+pub fn walk_generics<'a, V: Visitor<'a>>(visitor: &mut V, generics: &'a Generics) {
for param in &generics.ty_params {
visitor.visit_ident(param.span, param.ident);
walk_list!(visitor, visit_ty_param_bound, ¶m.bounds);
}
}
-pub fn walk_fn_ret_ty<V: Visitor>(visitor: &mut V, ret_ty: &FunctionRetTy) {
+pub fn walk_fn_ret_ty<'a, V: Visitor<'a>>(visitor: &mut V, ret_ty: &'a FunctionRetTy) {
if let FunctionRetTy::Ty(ref output_ty) = *ret_ty {
visitor.visit_ty(output_ty)
}
}
-pub fn walk_fn_decl<V: Visitor>(visitor: &mut V, function_declaration: &FnDecl) {
+pub fn walk_fn_decl<'a, V: Visitor<'a>>(visitor: &mut V, function_declaration: &'a FnDecl) {
for argument in &function_declaration.inputs {
visitor.visit_pat(&argument.pat);
visitor.visit_ty(&argument.ty)
visitor.visit_fn_ret_ty(&function_declaration.output)
}
-pub fn walk_fn<V>(visitor: &mut V, kind: FnKind, declaration: &FnDecl, _span: Span)
- where V: Visitor,
+pub fn walk_fn<'a, V>(visitor: &mut V, kind: FnKind<'a>, declaration: &'a FnDecl, _span: Span)
+ where V: Visitor<'a>,
{
match kind {
FnKind::ItemFn(_, generics, _, _, _, _, body) => {
}
}
-pub fn walk_trait_item<V: Visitor>(visitor: &mut V, trait_item: &TraitItem) {
+pub fn walk_trait_item<'a, V: Visitor<'a>>(visitor: &mut V, trait_item: &'a TraitItem) {
visitor.visit_ident(trait_item.span, trait_item.ident);
walk_list!(visitor, visit_attribute, &trait_item.attrs);
match trait_item.node {
}
}
-pub fn walk_impl_item<V: Visitor>(visitor: &mut V, impl_item: &ImplItem) {
+pub fn walk_impl_item<'a, V: Visitor<'a>>(visitor: &mut V, impl_item: &'a ImplItem) {
visitor.visit_vis(&impl_item.vis);
visitor.visit_ident(impl_item.span, impl_item.ident);
walk_list!(visitor, visit_attribute, &impl_item.attrs);
}
}
-pub fn walk_struct_def<V: Visitor>(visitor: &mut V, struct_definition: &VariantData) {
+pub fn walk_struct_def<'a, V: Visitor<'a>>(visitor: &mut V, struct_definition: &'a VariantData) {
walk_list!(visitor, visit_struct_field, struct_definition.fields());
}
-pub fn walk_struct_field<V: Visitor>(visitor: &mut V, struct_field: &StructField) {
+pub fn walk_struct_field<'a, V: Visitor<'a>>(visitor: &mut V, struct_field: &'a StructField) {
visitor.visit_vis(&struct_field.vis);
walk_opt_ident(visitor, struct_field.span, struct_field.ident);
visitor.visit_ty(&struct_field.ty);
walk_list!(visitor, visit_attribute, &struct_field.attrs);
}
-pub fn walk_block<V: Visitor>(visitor: &mut V, block: &Block) {
+pub fn walk_block<'a, V: Visitor<'a>>(visitor: &mut V, block: &'a Block) {
walk_list!(visitor, visit_stmt, &block.stmts);
}
-pub fn walk_stmt<V: Visitor>(visitor: &mut V, statement: &Stmt) {
+pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) {
match statement.node {
StmtKind::Local(ref local) => visitor.visit_local(local),
StmtKind::Item(ref item) => visitor.visit_item(item),
}
}
-pub fn walk_mac<V: Visitor>(_: &mut V, _: &Mac) {
+pub fn walk_mac<'a, V: Visitor<'a>>(_: &mut V, _: &Mac) {
// Empty!
}
-pub fn walk_expr<V: Visitor>(visitor: &mut V, expression: &Expr) {
+pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) {
for attr in expression.attrs.iter() {
visitor.visit_attribute(attr);
}
visitor.visit_expr_post(expression)
}
-pub fn walk_arm<V: Visitor>(visitor: &mut V, arm: &Arm) {
+pub fn walk_arm<'a, V: Visitor<'a>>(visitor: &mut V, arm: &'a Arm) {
walk_list!(visitor, visit_pat, &arm.pats);
walk_list!(visitor, visit_expr, &arm.guard);
visitor.visit_expr(&arm.body);
walk_list!(visitor, visit_attribute, &arm.attrs);
}
-pub fn walk_vis<V: Visitor>(visitor: &mut V, vis: &Visibility) {
+pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) {
if let Visibility::Restricted { ref path, id } = *vis {
visitor.visit_path(path, id);
}
struct MarkAttrs<'a>(&'a [ast::Name]);
-impl<'a> Visitor for MarkAttrs<'a> {
+impl<'a> Visitor<'a> for MarkAttrs<'a> {
fn visit_attribute(&mut self, attr: &Attribute) {
if self.0.contains(&attr.name()) {
mark_used(attr);
res
}
}
-
types: Vec<P<ast::Ty>>,
}
- impl<'a, 'b> visit::Visitor for Visitor<'a, 'b> {
- fn visit_ty(&mut self, ty: &ast::Ty) {
+ impl<'a, 'b> visit::Visitor<'a> for Visitor<'a, 'b> {
+ fn visit_ty(&mut self, ty: &'a ast::Ty) {
match ty.node {
ast::TyKind::Path(_, ref path) if !path.global => {
if let Some(segment) = path.segments.first() {
let ecfg = ExpansionConfig::default("proc_macro".to_string());
let mut cx = ExtCtxt::new(sess, ecfg, resolver);
- let mut collect = CollectCustomDerives {
- derives: Vec::new(),
- in_root: true,
- handler: handler,
- is_proc_macro_crate: is_proc_macro_crate,
- is_test_crate: is_test_crate,
+ let derives = {
+ let mut collect = CollectCustomDerives {
+ derives: Vec::new(),
+ in_root: true,
+ handler: handler,
+ is_proc_macro_crate: is_proc_macro_crate,
+ is_test_crate: is_test_crate,
+ };
+ visit::walk_crate(&mut collect, &krate);
+ collect.derives
};
- visit::walk_crate(&mut collect, &krate);
if !is_proc_macro_crate {
return krate
return krate;
}
- krate.module.items.push(mk_registrar(&mut cx, &collect.derives));
+ krate.module.items.push(mk_registrar(&mut cx, &derives));
if krate.exported_macros.len() > 0 {
handler.err("cannot export macro_rules! macros from a `proc-macro` \
}
}
-impl<'a> Visitor for CollectCustomDerives<'a> {
- fn visit_item(&mut self, item: &ast::Item) {
+impl<'a> Visitor<'a> for CollectCustomDerives<'a> {
+ fn visit_item(&mut self, item: &'a ast::Item) {
// First up, make sure we're checking a bare function. If we're not then
// we're just not interested in this item.
//
visit::walk_item(self, item);
}
- fn visit_mod(&mut self, m: &ast::Mod, _s: Span, id: NodeId) {
+ fn visit_mod(&mut self, m: &'a ast::Mod, _s: Span, id: NodeId) {
let mut prev_in_root = self.in_root;
if id != ast::CRATE_NODE_ID {
prev_in_root = mem::replace(&mut self.in_root, false);
extern "C" const char*
LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef child, size_t *size) {
+#if LLVM_VERSION_GE(4, 0)
+ Expected<StringRef> name_or_err = child->getName();
+ if (!name_or_err) {
+ // rustc_llvm currently doesn't use this error string, but it might be useful
+ // in the future, and in the mean time this tells LLVM that the error was
+ // not ignored and that it shouldn't abort the process.
+ LLVMRustSetLastError(toString(name_or_err.takeError()).c_str());
+ return NULL;
+ }
+#else
ErrorOr<StringRef> name_or_err = child->getName();
if (name_or_err.getError())
return NULL;
+#endif
StringRef name = name_or_err.get();
*size = name.size();
return name.data();
extern "C" const char*
LLVMRustArchiveChildData(LLVMRustArchiveChildRef child, size_t *size) {
StringRef buf;
+#if LLVM_VERSION_GE(4, 0)
+ Expected<StringRef> buf_or_err = child->getBuffer();
+ if (!buf_or_err) {
+ LLVMRustSetLastError(toString(buf_or_err.takeError()).c_str());
+ return NULL;
+ }
+#else
ErrorOr<StringRef> buf_or_err = child->getBuffer();
if (buf_or_err.getError()) {
LLVMRustSetLastError(buf_or_err.getError().message().c_str());
return NULL;
}
+#endif
buf = buf_or_err.get();
*size = buf.size();
return buf.data();
StringRef PassArg = info->getPassArgument();
StringRef PassName = info->getPassName();
if (!PassArg.empty()) {
- printf("%15.*s - %.*s\n", PassArg.size(), PassArg.data(),
- PassName.size(), PassName.data());
+ // These unsigned->signed casts could theoretically overflow, but
+ // realistically never will (and even if, the result is implementation
+ // defined rather plain UB).
+ printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
+ (int)PassName.size(), PassName.data());
}
#else
if (info->getPassArgument() && *info->getPassArgument()) {
extern "C" bool
LLVMRustLinkInExternalBitcode(LLVMModuleRef dst, char *bc, size_t len) {
Module *Dst = unwrap(dst);
+
std::unique_ptr<MemoryBuffer> buf = MemoryBuffer::getMemBufferCopy(StringRef(bc, len));
+
+#if LLVM_VERSION_GE(4, 0)
+ Expected<std::unique_ptr<Module>> SrcOrError =
+ llvm::getLazyBitcodeModule(buf->getMemBufferRef(), Dst->getContext());
+ if (!SrcOrError) {
+ LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
+ return false;
+ }
+
+ auto Src = std::move(*SrcOrError);
+#else
ErrorOr<std::unique_ptr<Module>> Src =
llvm::getLazyBitcodeModule(std::move(buf), Dst->getContext());
if (!Src) {
LLVMRustSetLastError(Src.getError().message().c_str());
return false;
}
+#endif
std::string Err;
raw_string_ostream Stream(Err);
DiagnosticPrinterRawOStream DP(Stream);
-#if LLVM_VERSION_GE(3, 8)
+#if LLVM_VERSION_GE(4, 0)
+ if (Linker::linkModules(*Dst, std::move(Src))) {
+#elif LLVM_VERSION_GE(3, 8)
if (Linker::linkModules(*Dst, std::move(Src.get()))) {
#else
if (Linker::LinkModules(Dst, Src->get(), [&](const DiagnosticInfo &DI) { DI.print(DP); })) {
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2016-12-01
+2016-12-06
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Vectorize.h"
-#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm-c/Core.h"
#include "llvm-c/BitReader.h"
#include "llvm-c/ExecutionEngine.h"
#include "llvm/PassManager.h"
#endif
+#if LLVM_VERSION_GE(4, 0)
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#else
+#include "llvm/Bitcode/ReaderWriter.h"
+#endif
+
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DIBuilder.h"
}
}
-impl LateLintPass for Pass {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Pass {
fn check_crate(&mut self, cx: &LateContext, krate: &hir::Crate) {
if !attr::contains_name(&krate.attrs, "crate_okay") {
cx.span_lint(CRATE_NOT_OKAY, krate.span,
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_late_lint_pass(box Pass as LateLintPassObject);
+ reg.register_late_lint_pass(box Pass);
}
}
}
-impl LateLintPass for Pass {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Pass {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
match &*it.name.as_str() {
"lintme" => cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'"),
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_late_lint_pass(box Pass as LateLintPassObject);
+ reg.register_late_lint_pass(box Pass);
reg.register_lint_group("lint_me", vec![TEST_LINT, PLEASE_LINT]);
}
// Prefix in imports with empty braces should be resolved and checked privacy, stability, etc.
-use foo::{}; //~ ERROR failed to resolve. Maybe a missing `extern crate foo;`?
+use foo::{};
+//~^ ERROR failed to resolve. Maybe a missing `extern crate foo;`?
+//~| NOTE foo
fn main() {}
mod n {}
}
-use m::n::{}; //~ ERROR module `n` is private
+use m::n::{};
+//~^ ERROR module `n` is private
fn main() {}
extern crate lint_stability;
-use lint_stability::UnstableStruct::{}; //~ ERROR use of unstable library feature 'test_feature'
+use lint_stability::UnstableStruct::{};
+//~^ ERROR use of unstable library feature 'test_feature'
use lint_stability::StableStruct::{}; // OK
fn main() {}
use std::mem::*; // shouldn't get errors for not using
// everything imported
+use std::fmt::{};
+//~^ ERROR unused import: `use std::fmt::{};`
// Should get errors for both 'Some' and 'None'
use std::option::Option::{Some, None};
bool runOnFunction(Function &F) override;
- const char *getPassName() const override {
+#if LLVM_VERSION_MAJOR >= 4
+ StringRef
+#else
+ const char *
+#endif
+ getPassName() const override {
return "Some LLVM pass";
}
bool runOnModule(Module &M) override;
- const char *getPassName() const override {
+#if LLVM_VERSION_MAJOR >= 4
+ StringRef
+#else
+ const char *
+#endif
+ getPassName() const override {
return "Some LLVM pass";
}
}
}
-impl LateLintPass for Pass {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Pass {
fn check_crate(&mut self, cx: &LateContext, krate: &hir::Crate) {
if !attr::contains_name(&krate.attrs, "crate_okay") {
cx.span_lint(CRATE_NOT_OKAY, krate.span,
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_late_lint_pass(box Pass as LateLintPassObject);
+ reg.register_late_lint_pass(box Pass);
}
}
}
-impl LateLintPass for Pass {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Pass {
fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
match &*it.name.as_str() {
"lintme" => cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'"),
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_late_lint_pass(box Pass as LateLintPassObject);
+ reg.register_late_lint_pass(box Pass);
reg.register_lint_group("lint_me", vec![TEST_LINT, PLEASE_LINT]);
}
fn get_lints(&self) -> LintArray { lint_array!(REGION_HIERARCHY) }
}
-impl LateLintPass for Pass {
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Pass {
fn check_fn(&mut self, cx: &LateContext,
- fk: FnKind, _: &hir::FnDecl, expr: &hir::Expr,
- span: Span, node: ast::NodeId)
+ fk: FnKind, _: &hir::FnDecl, expr: &hir::Expr,
+ span: Span, node: ast::NodeId)
{
if let FnKind::Closure(..) = fk { return }
// except according to those terms.
// ignore-emscripten
+// ignore-android
#![feature(libc)]
use std::__rand::{thread_rng, Rng};
use std::thread;
-const REPEATS: usize = 5;
-const MAX_LEN: usize = 32;
-static drop_counts: [AtomicUsize; MAX_LEN] =
+const MAX_LEN: usize = 80;
+
+static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
// FIXME #5244: AtomicUsize is not Copy.
- [
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
- AtomicUsize::new(0), AtomicUsize::new(0),
- ];
-
-static creation_count: AtomicUsize = AtomicUsize::new(0);
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+];
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord)]
-struct DropCounter { x: u32, creation_id: usize }
+struct DropCounter {
+ x: u32,
+ id: usize,
+}
impl Drop for DropCounter {
fn drop(&mut self) {
- drop_counts[self.creation_id].fetch_add(1, Ordering::Relaxed);
+ DROP_COUNTS[self.id].fetch_add(1, Ordering::Relaxed);
}
}
-pub fn main() {
- // len can't go above 64.
- for len in 2..MAX_LEN {
- for _ in 0..REPEATS {
- // reset the count for these new DropCounters, so their
- // IDs start from 0.
- creation_count.store(0, Ordering::Relaxed);
+fn test(input: &[DropCounter]) {
+ let len = input.len();
- let mut rng = thread_rng();
- let main = (0..len).map(|_| {
- DropCounter {
- x: rng.next_u32(),
- creation_id: creation_count.fetch_add(1, Ordering::Relaxed),
- }
- }).collect::<Vec<_>>();
-
- // work out the total number of comparisons required to sort
- // this array...
- let mut count = 0_usize;
- main.clone().sort_by(|a, b| { count += 1; a.cmp(b) });
-
- // ... and then panic on each and every single one.
- for panic_countdown in 0..count {
- // refresh the counters.
- for c in &drop_counts {
- c.store(0, Ordering::Relaxed);
- }
+ // Work out the total number of comparisons required to sort
+ // this array...
+ let mut count = 0usize;
+ input.to_owned().sort_by(|a, b| { count += 1; a.cmp(b) });
- let v = main.clone();
-
- let _ = thread::spawn(move|| {
- let mut v = v;
- let mut panic_countdown = panic_countdown;
- v.sort_by(|a, b| {
- if panic_countdown == 0 {
- panic!()
- }
- panic_countdown -= 1;
- a.cmp(b)
- })
- }).join();
-
- // check that the number of things dropped is exactly
- // what we expect (i.e. the contents of `v`).
- for (i, c) in drop_counts.iter().enumerate().take(len) {
- let count = c.load(Ordering::Relaxed);
- assert!(count == 1,
- "found drop count == {} for i == {}, len == {}",
- count, i, len);
+ // ... and then panic on each and every single one.
+ for panic_countdown in 0..count {
+ // Refresh the counters.
+ for i in 0..len {
+ DROP_COUNTS[i].store(0, Ordering::Relaxed);
+ }
+
+ let v = input.to_owned();
+ let _ = thread::spawn(move || {
+ let mut v = v;
+ let mut panic_countdown = panic_countdown;
+ v.sort_by(|a, b| {
+ if panic_countdown == 0 {
+ panic!();
}
+ panic_countdown -= 1;
+ a.cmp(b)
+ })
+ }).join();
+
+ // Check that the number of things dropped is exactly
+ // what we expect (i.e. the contents of `v`).
+ for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+ let count = c.load(Ordering::Relaxed);
+ assert!(count == 1,
+ "found drop count == {} for i == {}, len == {}",
+ count, i, len);
+ }
+ }
+}
+
+fn main() {
+ for len in (1..20).chain(70..MAX_LEN) {
+ // Test on a random array.
+ let mut rng = thread_rng();
+ let input = (0..len).map(|id| {
+ DropCounter {
+ x: rng.next_u32(),
+ id: id,
}
+ }).collect::<Vec<_>>();
+ test(&input);
+
+ // Test on a sorted array with two elements randomly swapped, creating several natural
+ // runs of random lengths. Such arrays have very high chances of hitting all code paths in
+ // the merge procedure.
+ for _ in 0..5 {
+ let mut input = (0..len).map(|i|
+ DropCounter {
+ x: i as u32,
+ id: i,
+ }
+ ).collect::<Vec<_>>();
+
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ input.swap(a, b);
+
+ test(&input);
}
}
}