}
if env::var_os("SCCACHE_ERROR_LOG").is_some() {
- cfg.env("RUST_LOG", "sccache=debug");
+ cfg.env("RUST_LOG", "sccache=info");
}
// FIXME: we don't actually need to build all LLVM tools and all LLVM
args="$args --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID"
args="$args --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY"
args="$args --env SCCACHE_ERROR_LOG=/tmp/sccache/sccache.log"
- args="$args --env SCCACHE_LOG_LEVEL=debug"
args="$args --volume $objdir/tmp:/tmp/sccache"
else
mkdir -p $HOME/.cache/sccache
- [loop_break_value](loop-break-value.md)
- [macro_reexport](macro-reexport.md)
- [main](main.md)
+- [manually_drop](manually-drop.md)
- [map_entry_recover_keys](map-entry-recover-keys.md)
- [mpsc_select](mpsc-select.md)
- [n16](n16.md)
#![feature(heap_api)]
#![feature(inclusive_range)]
#![feature(lang_items)]
+#![feature(manually_drop)]
#![feature(nonzero)]
#![feature(pattern)]
#![feature(placement_in)]
// performance than with the 2nd method.
//
// All methods were benchmarked, and the 3rd showed best results. So we chose that one.
- let mut tmp = NoDrop { value: ptr::read(&v[0]) };
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
// Intermediate state of the insertion process is always tracked by `hole`, which
// serves two purposes:
// fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
// initially held exactly once.
let mut hole = InsertionHole {
- src: &mut tmp.value,
+ src: &mut *tmp,
dest: &mut v[1],
};
ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
for i in 2..v.len() {
- if !is_less(&v[i], &tmp.value) {
+ if !is_less(&v[i], &*tmp) {
break;
}
ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
}
}
- // Holds a value, but never drops it.
- #[allow(unions_with_drop_fields)]
- union NoDrop<T> {
- value: T
- }
-
// When dropped, copies from `src` into `dest`.
struct InsertionHole<T> {
src: *mut T,
/// initialize memory previous set to the result of `uninit`.
pub fn uninit<T>() -> T;
- /// Moves a value out of scope without running drop glue.
- pub fn forget<T>(_: T) -> ();
-
/// Reinterprets the bits of a value of one type as another type.
///
/// Both types must have the same size. Neither the original, nor the result,
/// Stopping at the first `true`:
///
/// ```
- /// let a = [1, 2, 3];
+ /// let a = [1, 2, 3, 4];
///
/// let mut iter = a.iter();
///
- /// assert_eq!(iter.position(|&x| x == 2), Some(1));
+ /// assert_eq!(iter.position(|&x| x >= 2), Some(1));
///
/// // we can still use `iter`, as there are more elements.
/// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // The returned index depends on iterator state
+ /// assert_eq!(iter.position(|&x| x == 4), Some(0));
+ ///
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn forget<T>(t: T) {
- unsafe { intrinsics::forget(t) }
+ ManuallyDrop::new(t);
}
/// Returns the size of a type in bytes.
}
}
+
+/// A wrapper to inhibit compiler from automatically calling `T`’s destructor.
+///
+/// This wrapper is 0-cost.
+///
+/// # Examples
+///
+/// This wrapper helps with explicitly documenting the drop order dependencies between fields of
+/// the type:
+///
+/// ```rust
+/// # #![feature(manually_drop)]
+/// use std::mem::ManuallyDrop;
+/// struct Peach;
+/// struct Banana;
+/// struct Melon;
+/// struct FruitBox {
+/// // Immediately clear there’s something non-trivial going on with these fields.
+/// peach: ManuallyDrop<Peach>,
+/// melon: Melon, // Field that’s independent of the other two.
+/// banana: ManuallyDrop<Banana>,
+/// }
+///
+/// impl Drop for FruitBox {
+/// fn drop(&mut self) {
+/// unsafe {
+/// // Explicit ordering in which field destructors are run specified in the intuitive
+/// // location – the destructor of the structure containing the fields.
+/// // Moreover, one can now reorder fields within the struct however much they want.
+/// ManuallyDrop::drop(&mut self.peach);
+/// ManuallyDrop::drop(&mut self.banana);
+/// }
+/// // After destructor for `FruitBox` runs (this function), the destructor for Melon gets
+/// // invoked in the usual manner, as it is not wrapped in `ManuallyDrop`.
+/// }
+/// }
+/// ```
+#[unstable(feature = "manually_drop", issue = "40673")]
+#[allow(unions_with_drop_fields)]
+pub union ManuallyDrop<T>{ value: T }
+
+impl<T> ManuallyDrop<T> {
+ /// Wrap a value to be manually dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![feature(manually_drop)]
+ /// use std::mem::ManuallyDrop;
+ /// ManuallyDrop::new(Box::new(()));
+ /// ```
+ #[unstable(feature = "manually_drop", issue = "40673")]
+ #[inline]
+ pub fn new(value: T) -> ManuallyDrop<T> {
+ ManuallyDrop { value: value }
+ }
+
+ /// Extract the value from the ManuallyDrop container.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![feature(manually_drop)]
+ /// use std::mem::ManuallyDrop;
+ /// let x = ManuallyDrop::new(Box::new(()));
+ /// let _: Box<()> = ManuallyDrop::into_inner(x);
+ /// ```
+ #[unstable(feature = "manually_drop", issue = "40673")]
+ #[inline]
+ pub fn into_inner(slot: ManuallyDrop<T>) -> T {
+ unsafe {
+ slot.value
+ }
+ }
+
+ /// Manually drops the contained value.
+ ///
+ /// # Unsafety
+ ///
+ /// This function runs the destructor of the contained value and thus the wrapped value
+ /// now represents uninitialized data. It is up to the user of this method to ensure the
+ /// uninitialized data is not actually used.
+ #[unstable(feature = "manually_drop", issue = "40673")]
+ #[inline]
+ pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {
+ ptr::drop_in_place(&mut slot.value)
+ }
+}
+
+#[unstable(feature = "manually_drop", issue = "40673")]
+impl<T> ::ops::Deref for ManuallyDrop<T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ unsafe {
+ &self.value
+ }
+ }
+}
+
+#[unstable(feature = "manually_drop", issue = "40673")]
+impl<T> ::ops::DerefMut for ManuallyDrop<T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe {
+ &mut self.value
+ }
+ }
+}
+
+#[unstable(feature = "manually_drop", issue = "40673")]
+impl<T: ::fmt::Debug> ::fmt::Debug for ManuallyDrop<T> {
+ fn fmt(&self, fmt: &mut ::fmt::Formatter) -> ::fmt::Result {
+ unsafe {
+ fmt.debug_tuple("ManuallyDrop").field(&self.value).finish()
+ }
+ }
+}
use mem;
use ptr;
-/// Holds a value, but never drops it.
-#[allow(unions_with_drop_fields)]
-union NoDrop<T> {
- value: T
-}
-
/// When dropped, copies from `src` into `dest`.
struct CopyOnDrop<T> {
src: *mut T,
// Read the first element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
- let mut tmp = NoDrop { value: ptr::read(v.get_unchecked(0)) };
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
let mut hole = CopyOnDrop {
- src: &mut tmp.value,
+ src: &mut *tmp,
dest: v.get_unchecked_mut(1),
};
ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
for i in 2..len {
- if !is_less(v.get_unchecked(i), &tmp.value) {
+ if !is_less(v.get_unchecked(i), &*tmp) {
break;
}
// Read the last element into a stack-allocated variable. If a following comparison
// operation panics, `hole` will get dropped and automatically write the element back
// into the slice.
- let mut tmp = NoDrop { value: ptr::read(v.get_unchecked(len - 1)) };
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
let mut hole = CopyOnDrop {
- src: &mut tmp.value,
+ src: &mut *tmp,
dest: v.get_unchecked_mut(len - 2),
};
ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
for i in (0..len-2).rev() {
- if !is_less(&tmp.value, v.get_unchecked(i)) {
+ if !is_less(&*tmp, v.get_unchecked(i)) {
break;
}
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
- let mut tmp = NoDrop { value: unsafe { ptr::read(pivot) } };
+ let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = CopyOnDrop {
- src: unsafe { &mut tmp.value },
+ src: &mut *tmp,
dest: pivot,
};
- let pivot = unsafe { &tmp.value };
+ let pivot = &*tmp;
// Find the first pair of out-of-order elements.
let mut l = 0;
// Read the pivot into a stack-allocated variable for efficiency. If a following comparison
// operation panics, the pivot will be automatically written back into the slice.
- let mut tmp = NoDrop { value: unsafe { ptr::read(pivot) } };
+ let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = CopyOnDrop {
- src: unsafe { &mut tmp.value },
+ src: &mut *tmp,
dest: pivot,
};
- let pivot = unsafe { &tmp.value };
+ let pivot = &*tmp;
// Now partition the slice.
let mut l = 0;
Some("one of: `address`, `leak`, `memory` or `thread`");
pub const parse_linker_flavor: Option<&'static str> =
Some(::rustc_back::LinkerFlavor::one_of());
+ pub const parse_optimization_fuel: Option<&'static str> =
+ Some("crate=integer");
}
#[allow(dead_code)]
}
true
}
+
+ fn parse_optimization_fuel(slot: &mut Option<(String, u64)>, v: Option<&str>) -> bool {
+ match v {
+ None => false,
+ Some(s) => {
+ let parts = s.split('=').collect::<Vec<_>>();
+ if parts.len() != 2 { return false; }
+ let crate_name = parts[0].to_string();
+ let fuel = parts[1].parse::<u64>();
+ if fuel.is_err() { return false; }
+ *slot = Some((crate_name, fuel.unwrap()));
+ true
+ }
+ }
+ }
}
) }
"Use a sanitizer"),
linker_flavor: Option<LinkerFlavor> = (None, parse_linker_flavor, [UNTRACKED],
"Linker flavor"),
+ fuel: Option<(String, u64)> = (None, parse_optimization_fuel, [TRACKED],
+ "Set the optimization fuel quota for a crate."),
+ print_fuel: Option<String> = (None, parse_opt_string, [TRACKED],
+ "Make Rustc print the total optimization fuel used by a crate."),
}
pub fn default_lib_output() -> CrateType {
impl_dep_tracking_hash_via_hash!(bool);
impl_dep_tracking_hash_via_hash!(usize);
+ impl_dep_tracking_hash_via_hash!(u64);
impl_dep_tracking_hash_via_hash!(String);
impl_dep_tracking_hash_via_hash!(lint::Level);
impl_dep_tracking_hash_via_hash!(Option<bool>);
impl_dep_tracking_hash_via_hash!(Option<usize>);
impl_dep_tracking_hash_via_hash!(Option<String>);
+ impl_dep_tracking_hash_via_hash!(Option<(String, u64)>);
impl_dep_tracking_hash_via_hash!(Option<PanicStrategy>);
impl_dep_tracking_hash_via_hash!(Option<lint::Level>);
impl_dep_tracking_hash_via_hash!(Option<PathBuf>);
impl_dep_tracking_hash_for_sortable_vec_of!((String, lint::Level));
impl_dep_tracking_hash_for_sortable_vec_of!((String, Option<String>,
Option<cstore::NativeLibraryKind>));
+ impl_dep_tracking_hash_for_sortable_vec_of!((String, u64));
impl DepTrackingHash for SearchPaths {
fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) {
let mut elems: Vec<_> = self
pub code_stats: RefCell<CodeStats>,
next_node_id: Cell<ast::NodeId>,
+
+ /// If -zfuel=crate=n is specified, Some(crate).
+ optimization_fuel_crate: Option<String>,
+ /// If -zfuel=crate=n is specified, initially set to n. Otherwise 0.
+ optimization_fuel_limit: Cell<u64>,
+ /// We're rejecting all further optimizations.
+ out_of_fuel: Cell<bool>,
+
+ // The next two are public because the driver needs to read them.
+
+ /// If -zprint-fuel=crate, Some(crate).
+ pub print_fuel_crate: Option<String>,
+ /// Always set to zero and incremented so that we can print fuel expended by a crate.
+ pub print_fuel: Cell<u64>,
}
pub struct PerfStats {
println!("Total time spent decoding DefPath tables: {}",
duration_to_secs_str(self.perf_stats.decode_def_path_tables_time.get()));
}
+
+ /// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
+ /// This expends fuel if applicable, and records fuel if applicable.
+ pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
+ let mut ret = true;
+ match self.optimization_fuel_crate {
+ Some(ref c) if c == crate_name => {
+ let fuel = self.optimization_fuel_limit.get();
+ ret = fuel != 0;
+ if fuel == 0 && !self.out_of_fuel.get() {
+ println!("optimization-fuel-exhausted: {}", msg());
+ self.out_of_fuel.set(true);
+ } else if fuel > 0 {
+ self.optimization_fuel_limit.set(fuel-1);
+ }
+ }
+ _ => {}
+ }
+ match self.print_fuel_crate {
+ Some(ref c) if c == crate_name=> {
+ self.print_fuel.set(self.print_fuel.get()+1);
+ },
+ _ => {}
+ }
+ ret
+ }
}
pub fn build_session(sopts: config::Options,
}
);
+ let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone());
+ let optimization_fuel_limit = Cell::new(sopts.debugging_opts.fuel.as_ref()
+ .map(|i| i.1).unwrap_or(0));
+ let print_fuel_crate = sopts.debugging_opts.print_fuel.clone();
+ let print_fuel = Cell::new(0);
+
let sess = Session {
dep_graph: dep_graph.clone(),
target: target_cfg,
decode_def_path_tables_time: Cell::new(Duration::from_secs(0)),
},
code_stats: RefCell::new(CodeStats::new()),
+ optimization_fuel_crate: optimization_fuel_crate,
+ optimization_fuel_limit: optimization_fuel_limit,
+ print_fuel_crate: print_fuel_crate,
+ print_fuel: print_fuel,
+ out_of_fuel: Cell::new(false),
};
init_llvm(&sess);
ast_ty_to_ty_cache: RefCell::new(NodeMap()),
}, f)
}
+
+ pub fn consider_optimizing<T: Fn() -> String>(&self, msg: T) -> bool {
+ let cname = self.crate_name(LOCAL_CRATE).as_str();
+ self.sess.consider_optimizing(&cname, msg)
+ }
}
impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> {
}
impl<'a, 'gcx, 'tcx> Struct {
- // FIXME(camlorn): reprs need a better representation to deal with multiple reprs on one type.
fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
repr: &ReprOptions, kind: StructKind,
scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
// Neither do 1-member and 2-member structs.
// In addition, code in trans assume that 2-element structs can become pairs.
// It's easier to just short-circuit here.
- let mut can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
- && ! (repr.c || repr.packed);
-
- // Disable field reordering until we can decide what to do.
- // The odd pattern here avoids a warning about the value never being read.
- if can_optimize { can_optimize = false; }
+ let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
+ && !(repr.c || repr.packed || repr.linear || repr.simd);
let (optimize, sort_ascending) = match kind {
StructKind::AlwaysSizedUnivariant => (can_optimize, false),
pub packed: bool,
pub simd: bool,
pub int: Option<attr::IntType>,
+ // Internal only for now. If true, don't reorder fields.
+ pub linear: bool,
}
impl_stable_hash_for!(struct ReprOptions {
c,
packed,
simd,
- int
+ int,
+ linear
});
impl ReprOptions {
ret.simd = true;
}
+ // This is here instead of layout because the choice must make it into metadata.
+ ret.linear = !tcx.consider_optimizing(|| format!("Reorder fields of {:?}",
+ tcx.item_path_str(did)));
ret
}
use std::mem;
use std::collections::range::RangeArgument;
use std::collections::Bound::{Excluded, Included, Unbounded};
+use std::mem::ManuallyDrop;
pub unsafe trait Array {
type Element;
- type PartialStorage: Default + Unsize<[ManuallyDrop<Self::Element>]>;
+ type PartialStorage: Unsize<[ManuallyDrop<Self::Element>]>;
const LEN: usize;
}
pub fn new() -> Self {
ArrayVec {
count: 0,
- values: Default::default(),
+ values: unsafe { ::std::mem::uninitialized() },
}
}
/// Panics when the stack vector is full.
pub fn push(&mut self, el: A::Element) {
let arr = &mut self.values as &mut [ManuallyDrop<_>];
- arr[self.count] = ManuallyDrop { value: el };
+ arr[self.count] = ManuallyDrop::new(el);
self.count += 1;
}
let arr = &mut self.values as &mut [ManuallyDrop<_>];
self.count -= 1;
unsafe {
- let value = ptr::read(&arr[self.count]);
- Some(value.value)
+ let value = ptr::read(&*arr[self.count]);
+ Some(value)
}
} else {
None
fn next(&mut self) -> Option<A::Element> {
let arr = &self.store as &[ManuallyDrop<_>];
unsafe {
- self.indices.next().map(|i| ptr::read(&arr[i]).value)
+ self.indices.next().map(|i| ptr::read(&*arr[i]))
}
}
#[inline]
fn next(&mut self) -> Option<A::Element> {
- self.iter.next().map(|elt| unsafe { ptr::read(elt as *const ManuallyDrop<_>).value })
+ self.iter.next().map(|elt| unsafe { ptr::read(&**elt) })
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter_mut()
}
}
-
-// FIXME: This should use repr(transparent) from rust-lang/rfcs#1758.
-#[allow(unions_with_drop_fields)]
-pub union ManuallyDrop<T> {
- value: T,
- #[allow(dead_code)]
- empty: (),
-}
-
-impl<T> ManuallyDrop<T> {
- fn new() -> ManuallyDrop<T> {
- ManuallyDrop {
- empty: ()
- }
- }
-}
-
-impl<T> Default for ManuallyDrop<T> {
- fn default() -> Self {
- ManuallyDrop::new()
- }
-}
#![feature(conservative_impl_trait)]
#![feature(discriminant_value)]
#![feature(specialization)]
+#![feature(manually_drop)]
#![cfg_attr(unix, feature(libc))]
#![cfg_attr(test, feature(test))]
control.make_glob_map = resolve::MakeGlobMap::Yes;
}
+ if sess.print_fuel_crate.is_some() {
+ let old_callback = control.compilation_done.callback;
+ control.compilation_done.callback = box move |state| {
+ old_callback(state);
+ let sess = state.session;
+ println!("Fuel used by {}: {}",
+ sess.print_fuel_crate.as_ref().unwrap(),
+ sess.print_fuel.get());
+ }
+ }
control
}
}
use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
-use mir::lvalue::LvalueRef;
+use mir::lvalue::{LvalueRef, Alignment};
use base::*;
use common::*;
use declare;
use std::cmp::Ordering;
use std::iter;
-use mir::lvalue::Alignment;
-
fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
C_nil(ccx)
}
// Effectively no-ops
- "uninit" | "forget" => {
+ "uninit" => {
C_nil(ccx)
}
"needs_drop" => {
for i in 0..elems.len() {
let val = bcx.extract_value(val, i);
- bcx.store(val, bcx.struct_gep(llresult, i), None);
+ let lval = LvalueRef::new_sized_ty(llresult, ret_ty,
+ Alignment::AbiAligned);
+ let (dest, align) = lval.trans_field_ptr(bcx, i);
+ bcx.store(val, dest, align.to_align());
}
C_nil(ccx)
}
let lvalue = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
- let dst = bcx.struct_gep(lvalue.llval, i);
+ let (dst, _) = lvalue.trans_field_ptr(bcx, i);
let arg = &mircx.fn_ty.args[idx];
idx += 1;
if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
"rustc_peek" => (1, vec![param(0)], param(0)),
"init" => (1, Vec::new(), param(0)),
"uninit" => (1, Vec::new(), param(0)),
- "forget" => (1, vec![ param(0) ], tcx.mk_nil()),
"transmute" => (2, vec![ param(0) ], param(1)),
"move_val_init" => {
(1,
);
for token in tokens {
- match token {
+ match token.trim() {
"" => {},
- "should_panic" => { data.should_panic = true; seen_rust_tags = true; },
- "no_run" => { data.no_run = true; seen_rust_tags = true; },
- "ignore" => { data.ignore = true; seen_rust_tags = true; },
- "rust" => { data.rust = true; seen_rust_tags = true; },
- "test_harness" => { data.test_harness = true; seen_rust_tags = true; },
+ "should_panic" => {
+ data.should_panic = true;
+ seen_rust_tags = seen_other_tags == false;
+ }
+ "no_run" => { data.no_run = true; seen_rust_tags = !seen_other_tags; }
+ "ignore" => { data.ignore = true; seen_rust_tags = !seen_other_tags; }
+ "rust" => { data.rust = true; seen_rust_tags = true; }
+ "test_harness" => {
+ data.test_harness = true;
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
+ }
"compile_fail" if allow_compile_fail => {
data.compile_fail = true;
- seen_rust_tags = true;
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
data.no_run = true;
}
x if allow_error_code_check && x.starts_with("E") && x.len() == 5 => {
if let Ok(_) = x[1..].parse::<u32>() {
data.error_codes.push(x.to_owned());
- seen_rust_tags = true;
+ seen_rust_tags = !seen_other_tags || seen_rust_tags;
} else {
seen_other_tags = true;
}
t("test_harness", false, false, false, true, true, false, Vec::new());
t("compile_fail", false, true, false, true, false, true, Vec::new());
t("{.no_run .example}", false, true, false, true, false, false, Vec::new());
- t("{.sh .should_panic}", true, false, false, true, false, false, Vec::new());
+ t("{.sh .should_panic}", true, false, false, false, false, false, Vec::new());
t("{.example .rust}", false, false, false, true, false, false, Vec::new());
t("{.test_harness .rust}", false, false, false, true, true, false, Vec::new());
+ t("text, no_run", false, true, false, false, false, false, Vec::new());
+ t("text,no_run", false, true, false, false, false, false, Vec::new());
}
#[test]
/// Arrays of sizes from 0 to 32 (inclusive) implement the following traits if
/// the element type allows it:
///
-/// - [`Clone`][clone] (only if `T: [Copy][copy]`)
+/// - [`Clone`][clone] (only if `T: `[`Copy`][copy])
/// - [`Debug`][debug]
/// - [`IntoIterator`][intoiterator] (implemented for `&[T; N]` and `&mut [T; N]`)
/// - [`PartialEq`][partialeq], [`PartialOrd`][partialord], [`Eq`][eq], [`Ord`][ord]
#![feature(core_intrinsics)]
-use std::intrinsics::{init, forget};
+use std::intrinsics::{init};
// Test that the `forget` and `init` intrinsics are really unsafe
pub fn main() {
let stuff = init::<isize>(); //~ ERROR call to unsafe function requires unsafe
- forget(stuff); //~ ERROR call to unsafe function requires unsafe
}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name="foo"]
+
+use std::mem::size_of;
+
+// compile-flags: -Z fuel=foo=0
+
+struct S1(u8, u16, u8);
+struct S2(u8, u16, u8);
+
+fn main() {
+ assert_eq!(size_of::<S1>(), 6);
+ assert_eq!(size_of::<S2>(), 6);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name="foo"]
+
+use std::mem::size_of;
+
+// compile-flags: -Z fuel=foo=1
+
+struct S1(u8, u16, u8);
+struct S2(u8, u16, u8);
+
+fn main() {
+ let optimized = (size_of::<S1>() == 4) as usize
+ +(size_of::<S2>() == 4) as usize;
+ assert_eq!(optimized, 1);
+}
+
+
a([u16; 0], u8), b
}
+struct ReorderedStruct {
+ a: u8,
+ b: u16,
+ c: u8
+}
+
+enum ReorderedEnum {
+ A(u8, u16, u8),
+ B(u8, u16, u8),
+}
+
pub fn main() {
assert_eq!(size_of::<u8>(), 1 as usize);
assert_eq!(size_of::<u32>(), 4 as usize);
assert_eq!(size_of::<e1>(), 8 as usize);
assert_eq!(size_of::<e2>(), 8 as usize);
assert_eq!(size_of::<e3>(), 4 as usize);
+ assert_eq!(size_of::<ReorderedStruct>(), 4);
+ assert_eq!(size_of::<ReorderedEnum>(), 6);
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name="foo"]
+#![allow(dead_code)]
+
+// compile-flags: -Z print-fuel=foo
+
+struct S1(u8, u16, u8);
+struct S2(u8, u16, u8);
+struct S3(u8, u16, u8);
+
+fn main() {
+}
--- /dev/null
+Fuel used by foo: 3
-print-type-size type: `IndirectNonZero<u32>`: 20 bytes, alignment: 4 bytes
-print-type-size field `.pre`: 1 bytes
-print-type-size padding: 3 bytes
-print-type-size field `.nested`: 12 bytes, alignment: 4 bytes
+print-type-size type: `IndirectNonZero<u32>`: 12 bytes, alignment: 4 bytes
+print-type-size field `.nested`: 8 bytes
print-type-size field `.post`: 2 bytes
-print-type-size end padding: 2 bytes
-print-type-size type: `MyOption<IndirectNonZero<u32>>`: 20 bytes, alignment: 4 bytes
-print-type-size variant `Some`: 20 bytes
-print-type-size field `.0`: 20 bytes
-print-type-size type: `EmbeddedDiscr`: 12 bytes, alignment: 4 bytes
-print-type-size variant `Record`: 10 bytes
-print-type-size field `.pre`: 1 bytes
-print-type-size padding: 3 bytes
-print-type-size field `.val`: 4 bytes, alignment: 4 bytes
-print-type-size field `.post`: 2 bytes
-print-type-size end padding: 2 bytes
-print-type-size type: `NestedNonZero<u32>`: 12 bytes, alignment: 4 bytes
print-type-size field `.pre`: 1 bytes
-print-type-size padding: 3 bytes
-print-type-size field `.val`: 4 bytes, alignment: 4 bytes
+print-type-size end padding: 1 bytes
+print-type-size type: `MyOption<IndirectNonZero<u32>>`: 12 bytes, alignment: 4 bytes
+print-type-size variant `Some`: 12 bytes
+print-type-size field `.0`: 12 bytes
+print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes
+print-type-size variant `Record`: 7 bytes
+print-type-size field `.val`: 4 bytes
+print-type-size field `.post`: 2 bytes
+print-type-size field `.pre`: 1 bytes
+print-type-size end padding: 1 bytes
+print-type-size type: `NestedNonZero<u32>`: 8 bytes, alignment: 4 bytes
+print-type-size field `.val`: 4 bytes
print-type-size field `.post`: 2 bytes
-print-type-size end padding: 2 bytes
+print-type-size field `.pre`: 1 bytes
+print-type-size end padding: 1 bytes
print-type-size type: `MyOption<core::nonzero::NonZero<u32>>`: 4 bytes, alignment: 4 bytes
print-type-size variant `Some`: 4 bytes
print-type-size field `.0`: 4 bytes
-print-type-size type: `Padded`: 16 bytes, alignment: 4 bytes
+print-type-size type: `Padded`: 12 bytes, alignment: 4 bytes
+print-type-size field `.g`: 4 bytes
+print-type-size field `.h`: 2 bytes
print-type-size field `.a`: 1 bytes
print-type-size field `.b`: 1 bytes
-print-type-size padding: 2 bytes
-print-type-size field `.g`: 4 bytes, alignment: 4 bytes
print-type-size field `.c`: 1 bytes
-print-type-size padding: 1 bytes
-print-type-size field `.h`: 2 bytes, alignment: 2 bytes
print-type-size field `.d`: 1 bytes
-print-type-size end padding: 3 bytes
+print-type-size end padding: 2 bytes
print-type-size type: `Packed`: 10 bytes, alignment: 1 bytes
print-type-size field `.a`: 1 bytes
print-type-size field `.b`: 1 bytes
print-type-size type: `E1`: 12 bytes, alignment: 4 bytes
-print-type-size discriminant: 4 bytes
-print-type-size variant `A`: 5 bytes
-print-type-size field `.0`: 4 bytes
+print-type-size discriminant: 1 bytes
+print-type-size variant `A`: 7 bytes
print-type-size field `.1`: 1 bytes
-print-type-size variant `B`: 8 bytes
-print-type-size field `.0`: 8 bytes
+print-type-size padding: 2 bytes
+print-type-size field `.0`: 4 bytes, alignment: 4 bytes
+print-type-size variant `B`: 11 bytes
+print-type-size padding: 3 bytes
+print-type-size field `.0`: 8 bytes, alignment: 4 bytes
print-type-size type: `E2`: 12 bytes, alignment: 4 bytes
print-type-size discriminant: 1 bytes
print-type-size variant `A`: 7 bytes
print-type-size padding: 3 bytes
print-type-size field `.0`: 8 bytes, alignment: 4 bytes
print-type-size type: `S`: 8 bytes, alignment: 4 bytes
+print-type-size field `.g`: 4 bytes
print-type-size field `.a`: 1 bytes
print-type-size field `.b`: 1 bytes
-print-type-size padding: 2 bytes
-print-type-size field `.g`: 4 bytes, alignment: 4 bytes
+print-type-size end padding: 2 bytes
extern crate toml;
extern crate rustc_serialize;
-use std::collections::{BTreeMap, HashMap};
+use std::collections::BTreeMap;
use std::env;
use std::fs::File;
use std::io::{self, Read, Write};
struct Manifest {
manifest_version: String,
date: String,
- pkg: HashMap<String, Package>,
+ pkg: BTreeMap<String, Package>,
}
#[derive(RustcEncodable)]
struct Package {
version: String,
- target: HashMap<String, Target>,
+ target: BTreeMap<String, Target>,
}
#[derive(RustcEncodable)]
input: PathBuf,
output: PathBuf,
gpg_passphrase: String,
- digests: HashMap<String, String>,
+ digests: BTreeMap<String, String>,
s3_address: String,
date: String,
rust_version: String,
input: input,
output: output,
gpg_passphrase: passphrase,
- digests: HashMap::new(),
+ digests: BTreeMap::new(),
s3_address: s3_address,
date: date,
rust_version: String::new(),
let mut manifest = Manifest {
manifest_version: "2".to_string(),
date: self.date.to_string(),
- pkg: HashMap::new(),
+ pkg: BTreeMap::new(),
};
self.package("rustc", &mut manifest.pkg, HOSTS);
let mut pkg = Package {
version: self.cached_version("rust").to_string(),
- target: HashMap::new(),
+ target: BTreeMap::new(),
};
for host in HOSTS {
let filename = self.filename("rust", host);
fn package(&mut self,
pkgname: &str,
- dst: &mut HashMap<String, Package>,
+ dst: &mut BTreeMap<String, Package>,
targets: &[&str]) {
let targets = targets.iter().map(|name| {
let filename = self.filename(pkgname, name);