_ptr: *mut ArcInner<T>,
}
+impl<T: Sync + Send> Send for Arc<T> { }
+
+impl<T: Sync + Send> Sync for Arc<T> { }
+
struct ArcInner<T> {
strong: atomic::AtomicUint,
weak: atomic::AtomicUint,
use core::kinds::Sized;
use core::mem;
use core::option::Option;
+use core::ptr::OwnedPtr;
use core::raw::TraitObject;
use core::result::Result;
use core::result::Result::{Ok, Err};
/// A type that represents a uniquely-owned value.
#[lang = "owned_box"]
#[unstable = "custom allocators will add an additional type parameter (with default)"]
-pub struct Box<T>(*mut T);
+pub struct Box<T>(OwnedPtr<T>);
#[stable]
impl<T: Default> Default for Box<T> {
use core::mem;
use core::num::{Int, UnsignedInt};
use core::ops;
-use core::ptr;
+use core::ptr::{mod, OwnedPtr};
use core::raw::Slice as RawSlice;
use core::uint;
#[unsafe_no_drop_flag]
#[stable]
pub struct Vec<T> {
- ptr: *mut T,
+ ptr: OwnedPtr<T>,
len: uint,
cap: uint,
}
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
- Vec { ptr: EMPTY as *mut T, len: 0, cap: 0 }
+ Vec { ptr: OwnedPtr(EMPTY as *mut T), len: 0, cap: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
#[stable]
pub fn with_capacity(capacity: uint) -> Vec<T> {
if mem::size_of::<T>() == 0 {
- Vec { ptr: EMPTY as *mut T, len: 0, cap: uint::MAX }
+ Vec { ptr: OwnedPtr(EMPTY as *mut T), len: 0, cap: uint::MAX }
} else if capacity == 0 {
Vec::new()
} else {
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
- Vec { ptr: ptr as *mut T, len: 0, cap: capacity }
+ Vec { ptr: OwnedPtr(ptr as *mut T), len: 0, cap: capacity }
}
}
#[unstable = "needs finalization"]
pub unsafe fn from_raw_parts(ptr: *mut T, length: uint,
capacity: uint) -> Vec<T> {
- Vec { ptr: ptr, len: length, cap: capacity }
+ Vec { ptr: OwnedPtr(ptr), len: length, cap: capacity }
}
/// Creates a vector by copying the elements from a raw pointer.
if self.len == 0 {
if self.cap != 0 {
unsafe {
- dealloc(self.ptr, self.cap)
+ dealloc(self.ptr.0, self.cap)
}
self.cap = 0;
}
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
- self.ptr = reallocate(self.ptr as *mut u8,
- self.cap * mem::size_of::<T>(),
- self.len * mem::size_of::<T>(),
- mem::min_align_of::<T>()) as *mut T;
- if self.ptr.is_null() { ::alloc::oom() }
+ self.ptr = OwnedPtr(reallocate(self.ptr.0 as *mut u8,
+ self.cap * mem::size_of::<T>(),
+ self.len * mem::size_of::<T>(),
+ mem::min_align_of::<T>()) as *mut T);
+ if self.ptr.0.is_null() { ::alloc::oom() }
}
self.cap = self.len;
}
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
unsafe {
mem::transmute(RawSlice {
- data: self.ptr as *const T,
+ data: self.ptr.0 as *const T,
len: self.len,
})
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
- let ptr = self.ptr;
+ let ptr = self.ptr.0;
let cap = self.cap;
- let begin = self.ptr as *const T;
+ let begin = self.ptr.0 as *const T;
let end = if mem::size_of::<T>() == 0 {
(ptr as uint + self.len()) as *const T
} else {
let size = max(old_size, 2 * mem::size_of::<T>()) * 2;
if old_size > size { panic!("capacity overflow") }
unsafe {
- self.ptr = alloc_or_realloc(self.ptr, old_size, size);
- if self.ptr.is_null() { ::alloc::oom() }
+ self.ptr = OwnedPtr(alloc_or_realloc(self.ptr.0, old_size, size));
+ if self.ptr.0.is_null() { ::alloc::oom() }
}
self.cap = max(self.cap, 2) * 2;
}
unsafe {
- let end = (self.ptr as *const T).offset(self.len as int) as *mut T;
+ let end = self.ptr.0.offset(self.len as int);
ptr::write(&mut *end, value);
self.len += 1;
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
unsafe {
- let begin = self.ptr as *const T;
+ let begin = self.ptr.0 as *const T;
let end = if mem::size_of::<T>() == 0 {
- (self.ptr as uint + self.len()) as *const T
+ (self.ptr.0 as uint + self.len()) as *const T
} else {
- self.ptr.offset(self.len() as int) as *const T
+ self.ptr.0.offset(self.len() as int) as *const T
};
self.set_len(0);
Drain {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
- self.ptr = alloc_or_realloc(self.ptr, self.cap * mem::size_of::<T>(), size);
- if self.ptr.is_null() { ::alloc::oom() }
+ self.ptr = OwnedPtr(alloc_or_realloc(self.ptr.0,
+ self.cap * mem::size_of::<T>(),
+ size));
+ if self.ptr.0.is_null() { ::alloc::oom() }
}
self.cap = capacity;
}
fn as_slice<'a>(&'a self) -> &'a [T] {
unsafe {
mem::transmute(RawSlice {
- data: self.ptr as *const T,
+ data: self.ptr.0 as *const T,
len: self.len
})
}
for x in self.iter() {
ptr::read(x);
}
- dealloc(self.ptr, self.cap)
+ dealloc(self.ptr.0, self.cap)
}
}
}
for _x in self { }
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
mem::forget(self);
- Vec { ptr: allocation, cap: cap, len: 0 }
+ Vec { ptr: OwnedPtr(allocation), cap: cap, len: 0 }
}
}
pub use self::Ordering::*;
use intrinsics;
-use cell::UnsafeCell;
+use cell::{UnsafeCell, RacyCell};
/// A boolean type which can be safely shared between threads.
#[stable]
pub struct AtomicBool {
- v: UnsafeCell<uint>,
+ v: RacyCell<uint>,
}
/// A signed integer type which can be safely shared between threads.
#[stable]
pub struct AtomicInt {
- v: UnsafeCell<int>,
+ v: RacyCell<int>,
}
/// An unsigned integer type which can be safely shared between threads.
#[stable]
pub struct AtomicUint {
- v: UnsafeCell<uint>,
+ v: RacyCell<uint>,
}
/// A raw pointer type which can be safely shared between threads.
#[stable]
pub struct AtomicPtr<T> {
- p: UnsafeCell<uint>,
+ p: RacyCell<uint>,
}
/// Atomic memory orderings
/// An `AtomicBool` initialized to `false`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
pub const INIT_ATOMIC_BOOL: AtomicBool =
- AtomicBool { v: UnsafeCell { value: 0 } };
+ AtomicBool { v: RacyCell(UnsafeCell { value: 0 }) };
/// An `AtomicInt` initialized to `0`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
pub const INIT_ATOMIC_INT: AtomicInt =
- AtomicInt { v: UnsafeCell { value: 0 } };
+ AtomicInt { v: RacyCell(UnsafeCell { value: 0 }) };
/// An `AtomicUint` initialized to `0`.
#[unstable = "may be renamed, pending conventions for static initalizers"]
pub const INIT_ATOMIC_UINT: AtomicUint =
- AtomicUint { v: UnsafeCell { value: 0, } };
+ AtomicUint { v: RacyCell(UnsafeCell { value: 0 }) };
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
const UINT_TRUE: uint = -1;
#[stable]
pub fn new(v: bool) -> AtomicBool {
let val = if v { UINT_TRUE } else { 0 };
- AtomicBool { v: UnsafeCell::new(val) }
+ AtomicBool { v: RacyCell::new(val) }
}
/// Loads a value from the bool.
#[inline]
#[stable]
pub fn new(v: int) -> AtomicInt {
- AtomicInt {v: UnsafeCell::new(v)}
+ AtomicInt {v: RacyCell::new(v)}
}
/// Loads a value from the int.
#[inline]
#[stable]
pub fn new(v: uint) -> AtomicUint {
- AtomicUint { v: UnsafeCell::new(v) }
+ AtomicUint { v: RacyCell::new(v) }
}
/// Loads a value from the uint.
#[inline]
#[stable]
pub fn new(p: *mut T) -> AtomicPtr<T> {
- AtomicPtr { p: UnsafeCell::new(p as uint) }
+ AtomicPtr { p: RacyCell::new(p as uint) }
}
/// Loads a value from the pointer.
use clone::Clone;
use cmp::PartialEq;
use default::Default;
-use kinds::{marker, Copy};
+use kinds::{marker, Copy, Send, Sync};
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{None, Some};
#[deprecated = "renamed to into_inner()"]
pub unsafe fn unwrap(self) -> T { self.into_inner() }
}
+
+/// A version of `UnsafeCell` intended for use in concurrent data
+/// structures (for example, you might put it in an `Arc`).
+pub struct RacyCell<T>(pub UnsafeCell<T>);
+
+impl<T> RacyCell<T> {
+ /// DOX
+ pub fn new(value: T) -> RacyCell<T> {
+ RacyCell(UnsafeCell { value: value })
+ }
+
+ /// DOX
+ pub unsafe fn get(&self) -> *mut T {
+ self.0.get()
+ }
+
+ /// DOX
+ pub unsafe fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+}
+
+impl<T:Send> Send for RacyCell<T> { }
+
+impl<T> Sync for RacyCell<T> { } // Oh dear
use intrinsics;
use option::Option;
use option::Option::{Some, None};
+use kinds::{Send, Sync};
use cmp::{PartialEq, Eq, Ord, PartialOrd, Equiv};
use cmp::Ordering;
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
+
+/// A wrapper around a raw `*mut T` that indicates that the possessor
+/// of this wrapper owns the referent. This in turn implies that the
+/// `OwnedPtr<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a
+/// raw `*mut T` (which conveys no particular ownership semantics).
+/// Useful for building abstractions like `Vec<T>` or `Box<T>`, which
+/// internally use raw pointers to manage the memory that they own.
+pub struct OwnedPtr<T>(pub *mut T);
+
+/// `OwnedPtr` pointers are `Send` if `T` is `Send` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `OwnedPtr` must enforce it.
+impl<T:Send> Send for OwnedPtr<T> { }
+
+/// `OwnedPtr` pointers are `Sync` if `T` is `Sync` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `OwnedPtr` must enforce it.
+impl<T:Sync> Sync for OwnedPtr<T> { }
+
+impl<T> OwnedPtr<T> {
+ /// Returns a null OwnedPtr.
+ pub fn null() -> OwnedPtr<T> {
+ OwnedPtr(RawPtr::null())
+ }
+
+ /// Return an (unsafe) pointer into the memory owned by `self`.
+ pub unsafe fn offset(self, offset: int) -> *mut T {
+ (self.0 as *const T).offset(offset) as *mut T
+ }
+}
extern crate libc;
-use std::c_vec::CVec;
use libc::{c_void, size_t, c_int};
+use std::c_vec::CVec;
+use std::ptr::OwnedPtr;
#[link(name = "miniz", kind = "static")]
extern {
&mut outsz,
flags);
if !res.is_null() {
- Some(CVec::new_with_dtor(res as *mut u8, outsz as uint, move|:| libc::free(res)))
+ let res = OwnedPtr(res);
+ Some(CVec::new_with_dtor(res.0 as *mut u8, outsz as uint, move|:| libc::free(res.0)))
} else {
None
}
&mut outsz,
flags);
if !res.is_null() {
- Some(CVec::new_with_dtor(res as *mut u8, outsz as uint, move|:| libc::free(res)))
+ let res = OwnedPtr(res);
+ Some(CVec::new_with_dtor(res.0 as *mut u8, outsz as uint, move|:| libc::free(res.0)))
} else {
None
}
E0173,
E0174,
E0177,
- E0178
+ E0178,
+ E0179
}
use middle::traits;
use middle::mem_categorization as mc;
use middle::expr_use_visitor as euv;
+use util::common::ErrorReported;
use util::nodemap::NodeSet;
use syntax::ast;
let ty = ty::node_id_to_type(self.tcx, e.id);
let infcx = infer::new_infer_ctxt(self.tcx);
let mut fulfill_cx = traits::FulfillmentContext::new();
- fulfill_cx.register_builtin_bound(self.tcx, ty, ty::BoundSync,
- traits::ObligationCause::dummy());
- let env = ty::empty_parameter_environment();
- if !fulfill_cx.select_all_or_error(&infcx, &env, self.tcx).is_ok() {
- self.tcx.sess.span_err(e.span, "shared static items must have a \
- type which implements Sync");
+ match traits::poly_trait_ref_for_builtin_bound(self.tcx, ty::BoundSync, ty) {
+ Ok(trait_ref) => {
+ let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic);
+ fulfill_cx.register_trait_ref(self.tcx, trait_ref, cause);
+ let env = ty::empty_parameter_environment();
+ match fulfill_cx.select_all_or_error(&infcx, &env, self.tcx) {
+ Ok(()) => { },
+ Err(ref errors) => {
+ traits::report_fulfillment_errors(&infcx, errors);
+ }
+ }
+ }
+ Err(ErrorReported) => { }
}
}
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::{FulfillmentError, FulfillmentErrorCode,
+ ObligationCauseCode, SelectionError,
+ PredicateObligation, OutputTypeParameterMismatch};
+
+use middle::infer::InferCtxt;
+use middle::ty::{mod};
+use syntax::codemap::Span;
+use util::ppaux::{Repr, UserString};
+
+pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ errors: &Vec<FulfillmentError<'tcx>>) {
+ for error in errors.iter() {
+ report_fulfillment_error(infcx, error);
+ }
+}
+
+fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ error: &FulfillmentError<'tcx>) {
+ match error.code {
+ FulfillmentErrorCode::CodeSelectionError(ref e) => {
+ report_selection_error(infcx, &error.obligation, e);
+ }
+ FulfillmentErrorCode::CodeAmbiguity => {
+ maybe_report_ambiguity(infcx, &error.obligation);
+ }
+ }
+}
+
+pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+ error: &SelectionError<'tcx>)
+{
+ match *error {
+ SelectionError::Overflow => {
+ // We could track the stack here more precisely if we wanted, I imagine.
+ match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ let trait_ref =
+ infcx.resolve_type_vars_if_possible(&**trait_ref);
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "overflow evaluating the trait `{}` for the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ trait_ref.self_ty().user_string(infcx.tcx))[]);
+ }
+
+ ty::Predicate::Equate(ref predicate) => {
+ let predicate = infcx.resolve_type_vars_if_possible(predicate);
+ let err = infcx.equality_predicate(obligation.cause.span,
+ &predicate).unwrap_err();
+
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ }
+
+ ty::Predicate::TypeOutlives(..) |
+ ty::Predicate::RegionOutlives(..) => {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!("overflow evaluating lifetime predicate").as_slice());
+ }
+ }
+
+ let current_limit = infcx.tcx.sess.recursion_limit.get();
+ let suggested_limit = current_limit * 2;
+ infcx.tcx.sess.span_note(
+ obligation.cause.span,
+ format!(
+ "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
+ suggested_limit)[]);
+
+ note_obligation_cause(infcx, obligation);
+ }
+ SelectionError::Unimplemented => {
+ match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ let trait_ref =
+ infcx.resolve_type_vars_if_possible(
+ &**trait_ref);
+ if !ty::type_is_error(trait_ref.self_ty()) {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the trait `{}` is not implemented for the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ trait_ref.self_ty().user_string(infcx.tcx)).as_slice());
+ note_obligation_cause(infcx, obligation);
+ }
+ }
+
+ ty::Predicate::Equate(ref predicate) => {
+ let predicate = infcx.resolve_type_vars_if_possible(predicate);
+ let err = infcx.equality_predicate(obligation.cause.span,
+ &predicate).unwrap_err();
+
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ }
+
+ ty::Predicate::TypeOutlives(..) |
+ ty::Predicate::RegionOutlives(..) => {
+ let predicate = infcx.resolve_type_vars_if_possible(&obligation.trait_ref);
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied",
+ predicate.user_string(infcx.tcx)).as_slice());
+ }
+ }
+ }
+ OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
+ let expected_trait_ref =
+ infcx.resolve_type_vars_if_possible(
+ &**expected_trait_ref);
+ let actual_trait_ref =
+ infcx.resolve_type_vars_if_possible(
+ &**actual_trait_ref);
+ if !ty::type_is_error(actual_trait_ref.self_ty()) {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "type mismatch: the type `{}` implements the trait `{}`, \
+ but the trait `{}` is required ({})",
+ expected_trait_ref.self_ty().user_string(infcx.tcx),
+ expected_trait_ref.user_string(infcx.tcx),
+ actual_trait_ref.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, e)).as_slice());
+ note_obligation_cause(infcx, obligation);
+ }
+ }
+ }
+}
+
+fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ obligation: &PredicateObligation<'tcx>) {
+ // Unable to successfully determine, probably means
+ // insufficient type information, but could mean
+ // ambiguous impls. The latter *ought* to be a
+ // coherence violation, so we don't report it here.
+
+ let trait_ref = match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ infcx.resolve_type_vars_if_possible(&**trait_ref)
+ }
+ _ => {
+ infcx.tcx.sess.span_bug(
+ obligation.cause.span,
+ format!("ambiguity from something other than a trait: {}",
+ obligation.trait_ref.repr(infcx.tcx)).as_slice());
+ }
+ };
+ let self_ty = trait_ref.self_ty();
+
+ debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
+ trait_ref.repr(infcx.tcx),
+ self_ty.repr(infcx.tcx),
+ obligation.repr(infcx.tcx));
+ let all_types = &trait_ref.substs().types;
+ if all_types.iter().any(|&t| ty::type_is_error(t)) {
+ } else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
+ // This is kind of a hack: it frequently happens that some earlier
+ // error prevents types from being fully inferred, and then we get
+ // a bunch of uninteresting errors saying something like "<generic
+ // #0> doesn't implement Sized". It may even be true that we
+ // could just skip over all checks where the self-ty is an
+ // inference variable, but I was afraid that there might be an
+ // inference variable created, registered as an obligation, and
+ // then never forced by writeback, and hence by skipping here we'd
+ // be ignoring the fact that we don't KNOW the type works
+ // out. Though even that would probably be harmless, given that
+ // we're only talking about builtin traits, which are known to be
+ // inhabited. But in any case I just threw in this check for
+ // has_errors() to be sure that compilation isn't happening
+ // anyway. In that case, why inundate the user.
+ if !infcx.tcx.sess.has_errors() {
+ if infcx.tcx.lang_items.sized_trait()
+ .map_or(false, |sized_id| sized_id == trait_ref.def_id()) {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "unable to infer enough type information about `{}`; type annotations \
+ required",
+ self_ty.user_string(infcx.tcx)).as_slice());
+ } else {
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "unable to infer enough type information to \
+ locate the impl of the trait `{}` for \
+ the type `{}`; type annotations required",
+ trait_ref.user_string(infcx.tcx),
+ self_ty.user_string(infcx.tcx))[]);
+ note_obligation_cause(infcx, obligation);
+ }
+ }
+ } else if !infcx.tcx.sess.has_errors() {
+ // Ambiguity. Coherence should have reported an error.
+ infcx.tcx.sess.span_bug(
+ obligation.cause.span,
+ format!(
+ "coherence failed to report ambiguity: \
+ cannot locate the impl of the trait `{}` for \
+ the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ self_ty.user_string(infcx.tcx))[]);
+ }
+}
+
+fn note_obligation_cause<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ obligation: &PredicateObligation<'tcx>)
+{
+ let trait_ref = match obligation.trait_ref {
+ ty::Predicate::Trait(ref trait_ref) => {
+ infcx.resolve_type_vars_if_possible(&**trait_ref)
+ }
+ _ => {
+ infcx.tcx.sess.span_bug(
+ obligation.cause.span,
+ format!("ambiguity from something other than a trait: {}",
+ obligation.trait_ref.repr(infcx.tcx)).as_slice());
+ }
+ };
+
+ note_obligation_cause_code(infcx,
+ &trait_ref,
+ obligation.cause.span,
+ &obligation.cause.code)
+}
+
+fn note_obligation_cause_code<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ cause_span: Span,
+ cause_code: &ObligationCauseCode<'tcx>)
+{
+ let tcx = infcx.tcx;
+ let trait_name = ty::item_path_str(tcx, trait_ref.def_id());
+ match *cause_code {
+ ObligationCauseCode::MiscObligation => { }
+ ObligationCauseCode::ItemObligation(item_def_id) => {
+ let item_name = ty::item_path_str(tcx, item_def_id);
+ tcx.sess.span_note(
+ cause_span,
+ format!(
+ "the trait `{}` must be implemented because it is required by `{}`",
+ trait_name,
+ item_name).as_slice());
+ }
+ ObligationCauseCode::ObjectCastObligation(object_ty) => {
+ tcx.sess.span_note(
+ cause_span,
+ format!(
+ "the trait `{}` must be implemented for the cast \
+ to the object type `{}`",
+ trait_name,
+ infcx.ty_to_string(object_ty)).as_slice());
+ }
+ ObligationCauseCode::RepeatVec => {
+ tcx.sess.span_note(
+ cause_span,
+ "the `Copy` trait is required because the \
+ repeated element will be copied");
+ }
+ ObligationCauseCode::VariableType(_) => {
+ tcx.sess.span_note(
+ cause_span,
+ "all local variables must have a statically known size");
+ }
+ ObligationCauseCode::ReturnType => {
+ tcx.sess.span_note(
+ cause_span,
+ "the return type of a function must have a \
+ statically known size");
+ }
+ ObligationCauseCode::AssignmentLhsSized => {
+ tcx.sess.span_note(
+ cause_span,
+ "the left-hand-side of an assignment must have a statically known size");
+ }
+ ObligationCauseCode::StructInitializerSized => {
+ tcx.sess.span_note(
+ cause_span,
+ "structs must have a statically known size to be initialized");
+ }
+ ObligationCauseCode::ClosureCapture(var_id, closure_span, builtin_bound) => {
+ let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
+ let trait_name = ty::item_path_str(tcx, def_id);
+ let name = ty::local_var_name_str(tcx, var_id);
+ span_note!(tcx.sess, closure_span,
+ "the closure that captures `{}` requires that all captured variables \
+ implement the trait `{}`",
+ name,
+ trait_name);
+ }
+ ObligationCauseCode::FieldSized => {
+ span_note!(tcx.sess, cause_span,
+ "only the last field of a struct or enum variant \
+ may have a dynamically sized type")
+ }
+ ObligationCauseCode::ObjectSized => {
+ span_note!(tcx.sess, cause_span,
+ "only sized types can be made into objects");
+ }
+ ObligationCauseCode::SharedStatic => {
+ span_note!(tcx.sess, cause_span,
+ "shared static variables must have a type that implements `Sync`");
+ }
+ ObligationCauseCode::BuiltinDerivedObligation(ref root_trait_ref, ref root_cause_code) => {
+ let root_trait_ref =
+ infcx.resolve_type_vars_if_possible(&**root_trait_ref);
+ span_note!(tcx.sess, cause_span,
+ "the type `{}` must implement `{}` because it appears within the type `{}`",
+ trait_ref.self_ty().user_string(infcx.tcx),
+ trait_ref.user_string(infcx.tcx),
+ root_trait_ref.self_ty().user_string(infcx.tcx));
+ note_obligation_cause_code(infcx, &root_trait_ref, cause_span, &**root_cause_code);
+ }
+ ObligationCauseCode::ImplDerivedObligation(ref root_trait_ref, ref root_cause_code) => {
+ let root_trait_ref =
+ infcx.resolve_type_vars_if_possible(&**root_trait_ref);
+ span_note!(tcx.sess, cause_span,
+ "the type `{}` must implement `{}` due to the requirements \
+ on the impl of `{}` for the type `{}`",
+ trait_ref.self_ty().user_string(infcx.tcx),
+ trait_ref.user_string(infcx.tcx),
+ root_trait_ref.user_string(infcx.tcx),
+ root_trait_ref.self_ty().user_string(infcx.tcx));
+ note_obligation_cause_code(infcx, &root_trait_ref, cause_span, &**root_cause_code);
+ }
+ }
+}
let tcx = selcx.tcx();
match predicate.trait_ref {
ty::Predicate::Trait(ref trait_ref) => {
- let trait_obligation = Obligation { cause: predicate.cause,
+ let trait_obligation = Obligation { cause: predicate.cause.clone(),
recursion_depth: predicate.recursion_depth,
trait_ref: trait_ref.clone() };
match selcx.select(&trait_obligation) {
CodeSelectionError(Unimplemented)));
} else {
let ty::OutlivesPredicate(t_a, r_b) = binder.0;
- register_region_obligation(tcx, t_a, r_b, predicate.cause, region_obligations);
+ register_region_obligation(tcx, t_a, r_b,
+ predicate.cause.clone(),
+ region_obligations);
}
true
}
use syntax::ast;
use syntax::codemap::{Span, DUMMY_SP};
+pub use self::error_reporting::report_fulfillment_errors;
pub use self::fulfill::{FulfillmentContext, RegionObligation};
pub use self::select::SelectionContext;
pub use self::select::SelectionCache;
pub use self::util::poly_trait_ref_for_builtin_bound;
mod coherence;
+mod error_reporting;
mod fulfill;
mod select;
mod util;
pub type TraitObligation<'tcx> = Obligation<'tcx, Rc<ty::PolyTraitRef<'tcx>>>;
/// Why did we incur this obligation? Used for error reporting.
-#[deriving(Copy, Clone)]
+#[deriving(Clone)]
pub struct ObligationCause<'tcx> {
pub span: Span,
pub code: ObligationCauseCode<'tcx>
}
-#[deriving(Copy, Clone)]
+#[deriving(Clone)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
MiscObligation,
/// Obligation incurred due to an object cast.
ObjectCastObligation(/* Object type */ Ty<'tcx>),
- /// To implement drop, type must be sendable.
- DropTrait,
-
/// Various cases where expressions must be sized/copy/etc:
AssignmentLhsSized, // L = X implies that L is Sized
StructInitializerSized, // S { ... } must be Sized
// Only Sized types can be made into objects
ObjectSized,
+
+ // static items must have `Sync` type
+ SharedStatic,
+
+ BuiltinDerivedObligation(Rc<ty::PolyTraitRef<'tcx>>, Rc<ObligationCauseCode<'tcx>>),
+
+ ImplDerivedObligation(Rc<ty::PolyTraitRef<'tcx>>, Rc<ObligationCauseCode<'tcx>>),
}
pub type Obligations<'tcx, O> = subst::VecPerParamSpace<Obligation<'tcx, O>>;
use self::EvaluationResult::*;
use super::{PredicateObligation, Obligation, TraitObligation, ObligationCause};
+use super::{ObligationCauseCode, BuiltinDerivedObligation};
use super::{SelectionError, Unimplemented, Overflow, OutputTypeParameterMismatch};
use super::{Selection};
use super::{SelectionResult};
let obligation =
util::predicate_for_builtin_bound(
self.tcx(),
- previous_stack.obligation.cause,
+ previous_stack.obligation.cause.clone(),
bound,
previous_stack.obligation.recursion_depth + 1,
ty);
Ok(substs) => {
let vtable_impl = self.vtable_impl(impl_def_id,
substs,
- obligation.cause,
+ obligation.cause.clone(),
obligation.recursion_depth + 1,
skol_map,
snapshot);
// behavior, ignore user-defined impls here. This will
// go away by the time 1.0 is released.
if !self.tcx().sess.features.borrow().opt_out_copy {
- try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
+ try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
}
try!(self.assemble_builtin_bound_candidates(ty::BoundCopy,
stack,
&mut candidates));
}
+ Some(bound @ ty::BoundSend) |
+ Some(bound @ ty::BoundSync) => {
+ try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
+
+ // No explicit impls were declared for this type, consider the fallback rules.
+ if candidates.vec.is_empty() {
+ try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
+ }
+ }
+
+ Some(bound @ ty::BoundSized) => {
+ // Sized and Copy are always automatically computed.
+ try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
+ }
None => {
// For the time being, we ignore user-defined impls for builtin-bounds, other than
// (And unboxed candidates only apply to the Fn/FnMut/etc traits.)
try!(self.assemble_unboxed_closure_candidates(obligation, &mut candidates));
try!(self.assemble_fn_pointer_candidates(obligation, &mut candidates));
- try!(self.assemble_candidates_from_impls(obligation, &mut candidates));
- }
-
- Some(bound) => {
- try!(self.assemble_builtin_bound_candidates(bound, stack, &mut candidates));
+ try!(self.assemble_candidates_from_impls(obligation, &mut candidates.vec));
}
}
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_impls(&mut self,
obligation: &TraitObligation<'tcx>,
- candidates: &mut CandidateSet<'tcx>)
+ candidate_vec: &mut Vec<Candidate<'tcx>>)
-> Result<(), SelectionError<'tcx>>
{
let all_impls = self.all_impls(obligation.trait_ref.def_id());
match self.match_impl(impl_def_id, obligation, snapshot,
&skol_map, Rc::new(skol_obligation_trait_ref)) {
Ok(_) => {
- candidates.vec.push(ImplCandidate(impl_def_id));
+ candidate_vec.push(ImplCandidate(impl_def_id));
}
Err(()) => { }
}
}
}
- ty::ty_ptr(ty::mt { ty: referent_ty, .. }) => { // *const T, *mut T
+ ty::ty_ptr(..) => { // *const T, *mut T
match bound {
ty::BoundCopy |
ty::BoundSized => {
ty::BoundSync |
ty::BoundSend => {
- Ok(If(vec![referent_ty]))
+ // sync and send are not implemented for *const, *mut
+ Err(Unimplemented)
}
}
}
ty::BoundSync => {
if
Some(def_id) == tcx.lang_items.no_sync_bound() ||
- Some(def_id) == tcx.lang_items.managed_bound()
- {
- return Err(Unimplemented)
- } else if
+ Some(def_id) == tcx.lang_items.managed_bound() ||
Some(def_id) == tcx.lang_items.unsafe_type()
{
- // FIXME(#13231) -- we currently consider `UnsafeCell<T>`
- // to always be sync. This is allow for types like `Queue`
- // and `Mutex`, where `Queue<T> : Sync` is `T : Send`.
- return Ok(If(Vec::new()));
+ return Err(Unimplemented)
}
}
// where-clause trait-ref could be unified with the obligation
// trait-ref. Repeat that unification now without any
// transactional boundary; it should not fail.
- match self.confirm_poly_trait_refs(obligation.cause,
+ match self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
param.bound.clone()) {
Ok(()) => Ok(param),
nested: Vec<Ty<'tcx>>)
-> VtableBuiltinData<PredicateObligation<'tcx>>
{
+ let derived_cause = self.derived_cause(obligation, BuiltinDerivedObligation);
let obligations = nested.iter().map(|&t| {
util::predicate_for_builtin_bound(
self.tcx(),
- obligation.cause,
+ derived_cause.clone(),
bound,
obligation.recursion_depth + 1,
t)
// as a special case, `Send` requires `'static`
if bound == ty::BoundSend {
obligations.push(Obligation {
- cause: obligation.cause,
+ cause: obligation.cause.clone(),
recursion_depth: obligation.recursion_depth+1,
trait_ref: ty::Binder(ty::OutlivesPredicate(obligation.self_ty(),
ty::ReStatic)).as_predicate(),
let substs = self.rematch_impl(impl_def_id, obligation,
snapshot, &skol_map, Rc::new(skol_obligation_trait_ref));
debug!("confirm_impl_candidate substs={}", substs);
- Ok(self.vtable_impl(impl_def_id, substs, obligation.cause,
+ Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(),
obligation.recursion_depth + 1, skol_map, snapshot))
})
}
substs: substs,
}));
- try!(self.confirm_poly_trait_refs(obligation.cause,
+ try!(self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
trait_ref));
-
Ok(self_ty)
}
closure_def_id.repr(self.tcx()),
trait_ref.repr(self.tcx()));
- self.confirm_poly_trait_refs(obligation.cause,
+ self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.trait_ref.clone(),
trait_ref)
}
/// back `Ok(T=int)`.
fn match_inherent_impl(&mut self,
impl_def_id: ast::DefId,
- obligation_cause: ObligationCause,
+ obligation_cause: &ObligationCause,
obligation_self_ty: Ty<'tcx>)
-> Result<Substs<'tcx>,()>
{
}
fn match_self_types(&mut self,
- cause: ObligationCause,
+ cause: &ObligationCause,
// The self type provided by the impl/caller-obligation:
provided_self_ty: Ty<'tcx>,
None
}
}
+
+ fn derived_cause(&self,
+ obligation: &TraitObligation<'tcx>,
+ variant: fn(Rc<ty::Binder<ty::TraitRef<'tcx>>>,
+ Rc<ObligationCauseCode<'tcx>>)
+ -> ObligationCauseCode<'tcx>)
+ -> ObligationCause<'tcx>
+ {
+ /*!
+ * Creates a cause for obligations that are derived from
+ * `obligation` by a recursive search (e.g., for a builtin
+ * bound, or eventually a `impl Foo for ..`). If `obligation`
+ * is itself a derived obligation, this is just a clone, but
+ * otherwise we create a "derived obligation" cause so as to
+ * keep track of the original root obligation for error
+ * reporting.
+ */
+
+ if obligation.recursion_depth == 0 {
+ ObligationCause::new(obligation.cause.span,
+ obligation.trait_ref.def_id().node,
+ variant(obligation.trait_ref.clone(),
+ Rc::new(obligation.cause.code.clone())))
+ } else {
+ obligation.cause.clone()
+ }
+ }
}
impl<'tcx> Repr<'tcx> for Candidate<'tcx> {
generic_bounds.repr(tcx));
generic_bounds.predicates.map(|predicate| {
- Obligation { cause: cause,
+ Obligation { cause: cause.clone(),
recursion_depth: recursion_depth,
trait_ref: predicate.clone() }
})
{
fn fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::Obligation<'tcx, O> {
traits::Obligation {
- cause: self.cause,
+ cause: self.cause.clone(),
recursion_depth: self.recursion_depth,
trait_ref: self.trait_ref.fold_with(folder),
}
time_passes: bool,
}
+impl Send for ModuleConfig { }
+
impl ModuleConfig {
fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig {
ModuleConfig {
use syntax::ast_util::PostExpansionMethod;
use syntax::parse::token::{mod, special_idents};
-static DW_LANG_RUST: c_uint = 0x9000;
+const DW_LANG_RUST: c_uint = 0x9000;
#[allow(non_upper_case_globals)]
-static DW_TAG_auto_variable: c_uint = 0x100;
+const DW_TAG_auto_variable: c_uint = 0x100;
#[allow(non_upper_case_globals)]
-static DW_TAG_arg_variable: c_uint = 0x101;
+const DW_TAG_arg_variable: c_uint = 0x101;
#[allow(non_upper_case_globals)]
-static DW_ATE_boolean: c_uint = 0x02;
+const DW_ATE_boolean: c_uint = 0x02;
#[allow(non_upper_case_globals)]
-static DW_ATE_float: c_uint = 0x04;
+const DW_ATE_float: c_uint = 0x04;
#[allow(non_upper_case_globals)]
-static DW_ATE_signed: c_uint = 0x05;
+const DW_ATE_signed: c_uint = 0x05;
#[allow(non_upper_case_globals)]
-static DW_ATE_unsigned: c_uint = 0x07;
+const DW_ATE_unsigned: c_uint = 0x07;
#[allow(non_upper_case_globals)]
-static DW_ATE_unsigned_char: c_uint = 0x08;
+const DW_ATE_unsigned_char: c_uint = 0x08;
-static UNKNOWN_LINE_NUMBER: c_uint = 0;
-static UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+const UNKNOWN_LINE_NUMBER: c_uint = 0;
+const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
// ptr::null() doesn't work :(
-static UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
-static UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
+const UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
+const UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
-static FLAGS_NONE: c_uint = 0;
+const FLAGS_NONE: c_uint = 0;
//=-----------------------------------------------------------------------------
// Public Interface of debuginfo module
pub llmod: ModuleRef,
}
+impl Send for ModuleTranslation { }
+impl Sync for ModuleTranslation { }
+
pub struct CrateTranslation {
pub modules: Vec<ModuleTranslation>,
pub metadata_module: ModuleTranslation,
use check::{FnCtxt, structurally_resolved_type};
use middle::subst::{FnSpace};
use middle::traits;
-use middle::traits::{SelectionError, OutputTypeParameterMismatch, Overflow, Unimplemented};
use middle::traits::{Obligation, ObligationCause};
-use middle::traits::{FulfillmentError, CodeSelectionError, CodeAmbiguity};
-use middle::traits::{PredicateObligation};
+use middle::traits::report_fulfillment_errors;
use middle::ty::{mod, Ty};
use middle::infer;
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::Span;
-use util::ppaux::{UserString, Repr, ty_to_string};
+use util::ppaux::{Repr, ty_to_string};
pub fn check_object_cast<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
cast_expr: &ast::Expr,
fcx);
match r {
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(fcx, &errors); }
- }
-}
-
-pub fn report_fulfillment_errors<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- errors: &Vec<FulfillmentError<'tcx>>) {
- for error in errors.iter() {
- report_fulfillment_error(fcx, error);
- }
-}
-
-pub fn report_fulfillment_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- error: &FulfillmentError<'tcx>) {
- match error.code {
- CodeSelectionError(ref e) => {
- report_selection_error(fcx, &error.obligation, e);
- }
- CodeAmbiguity => {
- maybe_report_ambiguity(fcx, &error.obligation);
- }
- }
-}
-
-pub fn report_selection_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>,
- error: &SelectionError<'tcx>)
-{
- match *error {
- Overflow => {
- // We could track the stack here more precisely if we wanted, I imagine.
- let predicate =
- fcx.infcx().resolve_type_vars_if_possible(&obligation.trait_ref);
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "overflow evaluating the requirement `{}`",
- predicate.user_string(fcx.tcx())).as_slice());
-
- let current_limit = fcx.tcx().sess.recursion_limit.get();
- let suggested_limit = current_limit * 2;
- fcx.tcx().sess.span_note(
- obligation.cause.span,
- format!(
- "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate",
- suggested_limit)[]);
-
- note_obligation_cause(fcx, obligation);
- }
- Unimplemented => {
- match obligation.trait_ref {
- ty::Predicate::Trait(ref trait_ref) => {
- let trait_ref = fcx.infcx().resolve_type_vars_if_possible(&**trait_ref);
- if !ty::type_is_error(trait_ref.self_ty()) {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the trait `{}` is not implemented for the type `{}`",
- trait_ref.user_string(fcx.tcx()),
- trait_ref.self_ty().user_string(fcx.tcx())).as_slice());
- }
- }
-
- ty::Predicate::Equate(ref predicate) => {
- let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
- let err = fcx.infcx().equality_predicate(obligation.cause.span,
- &predicate).unwrap_err();
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied (`{}`)",
- predicate.user_string(fcx.tcx()),
- ty::type_err_to_str(fcx.tcx(), &err)).as_slice());
- }
-
- ty::Predicate::RegionOutlives(ref predicate) => {
- let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
- let err = fcx.infcx().region_outlives_predicate(obligation.cause.span,
- &predicate).unwrap_err();
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied (`{}`)",
- predicate.user_string(fcx.tcx()),
- ty::type_err_to_str(fcx.tcx(), &err)).as_slice());
- }
-
- ty::Predicate::TypeOutlives(ref predicate) => {
- let predicate = fcx.infcx().resolve_type_vars_if_possible(predicate);
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied",
- predicate.user_string(fcx.tcx())).as_slice());
- }
- }
-
- note_obligation_cause(fcx, obligation);
- }
- OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
- let expected_trait_ref =
- fcx.infcx().resolve_type_vars_if_possible(
- &**expected_trait_ref);
- let actual_trait_ref =
- fcx.infcx().resolve_type_vars_if_possible(
- &**actual_trait_ref);
- if !ty::type_is_error(actual_trait_ref.self_ty()) {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "type mismatch: the type `{}` implements the trait `{}`, \
- but the trait `{}` is required ({})",
- expected_trait_ref.self_ty().user_string(fcx.tcx()),
- expected_trait_ref.user_string(fcx.tcx()),
- actual_trait_ref.user_string(fcx.tcx()),
- ty::type_err_to_str(fcx.tcx(), e)).as_slice());
- note_obligation_cause(fcx, obligation);
- }
- }
- }
-}
-
-pub fn maybe_report_ambiguity<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>) {
- // Unable to successfully determine, probably means
- // insufficient type information, but could mean
- // ambiguous impls. The latter *ought* to be a
- // coherence violation, so we don't report it here.
-
- let trait_ref = match obligation.trait_ref {
- ty::Predicate::Trait(ref trait_ref) => {
- fcx.infcx().resolve_type_vars_if_possible(&**trait_ref)
- }
- _ => {
- fcx.tcx().sess.span_bug(
- obligation.cause.span,
- format!("ambiguity from something other than a trait: {}",
- obligation.trait_ref.repr(fcx.tcx())).as_slice());
- }
- };
- let self_ty = trait_ref.self_ty();
-
- debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
- trait_ref.repr(fcx.tcx()),
- self_ty.repr(fcx.tcx()),
- obligation.repr(fcx.tcx()));
- let all_types = &trait_ref.substs().types;
- if all_types.iter().any(|&t| ty::type_is_error(t)) {
- } else if all_types.iter().any(|&t| ty::type_needs_infer(t)) {
- // This is kind of a hack: it frequently happens that some earlier
- // error prevents types from being fully inferred, and then we get
- // a bunch of uninteresting errors saying something like "<generic
- // #0> doesn't implement Sized". It may even be true that we
- // could just skip over all checks where the self-ty is an
- // inference variable, but I was afraid that there might be an
- // inference variable created, registered as an obligation, and
- // then never forced by writeback, and hence by skipping here we'd
- // be ignoring the fact that we don't KNOW the type works
- // out. Though even that would probably be harmless, given that
- // we're only talking about builtin traits, which are known to be
- // inhabited. But in any case I just threw in this check for
- // has_errors() to be sure that compilation isn't happening
- // anyway. In that case, why inundate the user.
- if !fcx.tcx().sess.has_errors() {
- if fcx.ccx.tcx.lang_items.sized_trait()
- .map_or(false, |sized_id| sized_id == trait_ref.def_id()) {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "unable to infer enough type information about `{}`; type annotations \
- required",
- self_ty.user_string(fcx.tcx()))[]);
- } else {
- fcx.tcx().sess.span_err(
- obligation.cause.span,
- format!(
- "unable to infer enough type information to \
- locate the impl of the trait `{}` for \
- the type `{}`; type annotations required",
- trait_ref.user_string(fcx.tcx()),
- self_ty.user_string(fcx.tcx()))[]);
- note_obligation_cause(fcx, obligation);
- }
- }
- } else if !fcx.tcx().sess.has_errors() {
- // Ambiguity. Coherence should have reported an error.
- fcx.tcx().sess.span_bug(
- obligation.cause.span,
- format!(
- "coherence failed to report ambiguity: \
- cannot locate the impl of the trait `{}` for \
- the type `{}`",
- trait_ref.user_string(fcx.tcx()),
- self_ty.user_string(fcx.tcx()))[]);
+ Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
.select_where_possible(fcx.infcx(), &fcx.inh.param_env, fcx)
{
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(fcx, &errors); }
+ Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
.select_new_obligations(fcx.infcx(), &fcx.inh.param_env, fcx)
{
Ok(()) => { }
- Err(errors) => { report_fulfillment_errors(fcx, &errors); }
- }
-}
-
-fn note_obligation_cause<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- obligation: &PredicateObligation<'tcx>) {
- let tcx = fcx.tcx();
- match obligation.cause.code {
- traits::MiscObligation => { }
- traits::ItemObligation(item_def_id) => {
- let item_name = ty::item_path_str(tcx, item_def_id);
- tcx.sess.span_note(
- obligation.cause.span,
- format!(
- "required by `{}`",
- item_name).as_slice());
- }
- traits::ObjectCastObligation(object_ty) => {
- tcx.sess.span_note(
- obligation.cause.span,
- format!(
- "required for the cast to the object type `{}`",
- fcx.infcx().ty_to_string(object_ty)).as_slice());
- }
- traits::RepeatVec => {
- tcx.sess.span_note(
- obligation.cause.span,
- "the `Copy` trait is required because the \
- repeated element will be copied");
- }
- traits::VariableType(_) => {
- tcx.sess.span_note(
- obligation.cause.span,
- "all local variables must have a statically known size");
- }
- traits::ReturnType => {
- tcx.sess.span_note(
- obligation.cause.span,
- "the return type of a function must have a \
- statically known size");
- }
- traits::AssignmentLhsSized => {
- tcx.sess.span_note(
- obligation.cause.span,
- "the left-hand-side of an assignment must have a statically known size");
- }
- traits::StructInitializerSized => {
- tcx.sess.span_note(
- obligation.cause.span,
- "structs must have a statically known size to be initialized");
- }
- traits::DropTrait => {
- span_note!(tcx.sess, obligation.cause.span,
- "cannot implement a destructor on a \
- structure or enumeration that does not satisfy Send");
- span_help!(tcx.sess, obligation.cause.span,
- "use \"#[unsafe_destructor]\" on the implementation \
- to force the compiler to allow this");
- }
- traits::ClosureCapture(var_id, closure_span, builtin_bound) => {
- let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap();
- let trait_name = ty::item_path_str(tcx, def_id);
- let name = ty::local_var_name_str(tcx, var_id);
- span_note!(tcx.sess, closure_span,
- "the closure that captures `{}` requires that all captured variables \"
- implement the trait `{}`",
- name,
- trait_name);
- }
- traits::FieldSized => {
- span_note!(tcx.sess, obligation.cause.span,
- "only the last field of a struct or enum variant \
- may have a dynamically sized type")
- }
- traits::ObjectSized => {
- span_note!(tcx.sess, obligation.cause.span,
- "only sized types can be made into objects");
- }
+ Err(errors) => { report_fulfillment_errors(fcx.infcx(), &errors); }
}
}
match self_ty.sty {
ty::ty_struct(def_id, _) |
ty::ty_enum(def_id, _) => {
- check_struct_safe_for_destructor(fcx, item.span, self_ty, def_id);
+ check_struct_safe_for_destructor(fcx, item.span, def_id);
}
_ => {
// Coherence already reports an error in this case.
let poly_trait_ref = ty::Binder(trait_ref);
let predicates = ty::predicates_for_trait_ref(fcx.tcx(), &poly_trait_ref);
for predicate in predicates.into_iter() {
- fcx.register_predicate(traits::Obligation::new(cause, predicate));
+ fcx.register_predicate(traits::Obligation::new(cause.clone(), predicate));
}
});
}
fn check_struct_safe_for_destructor<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
span: Span,
- self_ty: Ty<'tcx>,
struct_did: ast::DefId) {
let struct_tpt = ty::lookup_item_type(fcx.tcx(), struct_did);
- if !struct_tpt.generics.has_type_params(subst::TypeSpace)
- && !struct_tpt.generics.has_region_params(subst::TypeSpace)
+ if struct_tpt.generics.has_type_params(subst::TypeSpace)
+ || struct_tpt.generics.has_region_params(subst::TypeSpace)
{
- let cause = traits::ObligationCause::new(span, fcx.body_id, traits::DropTrait);
- fcx.register_builtin_bound(self_ty, ty::BoundSend, cause);
- } else {
span_err!(fcx.tcx().sess, span, E0141,
"cannot implement a destructor on a structure \
- with type parameters");
- span_note!(fcx.tcx().sess, span,
- "use \"#[unsafe_destructor]\" on the implementation \
- to force the compiler to allow this");
+ with type parameters");
+ span_note!(fcx.tcx().sess, span,
+ "use \"#[unsafe_destructor]\" on the implementation \
+ to force the compiler to allow this");
}
}
use fmt;
use hash;
-use kinds::marker;
use mem;
use ptr;
use slice::{mod, ImmutableIntSlice};
use str;
use string::String;
-
+use core::kinds::marker;
/// The representation of a C String.
///
owns_buffer_: bool,
}
+impl Send for CString { }
+impl Sync for CString { }
+
impl Clone for CString {
/// Clone this CString into a new, uniquely owned CString. For safety
/// reasons, this is always a deep clone with the memory allocated
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{Some, None};
-use ptr::{RawPtr, copy_nonoverlapping_memory, zero_memory};
+use ptr::{OwnedPtr, RawPtr, copy_nonoverlapping_memory, zero_memory};
use ptr;
use rt::heap::{allocate, deallocate};
pub struct RawTable<K, V> {
capacity: uint,
size: uint,
- hashes: *mut u64,
+ hashes: OwnedPtr<u64>,
// Because K/V do not appear directly in any of the types in the struct,
// inform rustc that in fact instances of K and V are reachable from here.
marker: marker::CovariantType<(K,V)>,
return RawTable {
size: 0,
capacity: 0,
- hashes: 0 as *mut u64,
+ hashes: OwnedPtr::null(),
marker: marker::CovariantType,
};
}
RawTable {
capacity: capacity,
size: 0,
- hashes: hashes,
+ hashes: OwnedPtr(hashes),
marker: marker::CovariantType,
}
}
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
- let buffer = self.hashes as *mut u8;
+ let buffer = self.hashes.0 as *mut u8;
let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
keys_size, min_align_of::<K>(),
min_align_of::<V>());
unsafe {
RawBucket {
- hash: self.hashes,
+ hash: self.hashes.0,
key: buffer.offset(keys_offset as int) as *mut K,
val: buffer.offset(vals_offset as int) as *mut V
}
pub fn new(capacity: uint) -> RawTable<K, V> {
unsafe {
let ret = RawTable::new_uninitialized(capacity);
- zero_memory(ret.hashes, capacity);
+ zero_memory(ret.hashes.0, capacity);
ret
}
}
RawBuckets {
raw: self.first_bucket_raw(),
hashes_end: unsafe {
- self.hashes.offset(self.capacity as int)
+ self.hashes.0.offset(self.capacity as int)
},
marker: marker::ContravariantLifetime,
}
#[unsafe_destructor]
impl<K, V> Drop for RawTable<K, V> {
fn drop(&mut self) {
- if self.hashes.is_null() {
+ if self.hashes.0.is_null() {
return;
}
// This is done in reverse because we've likely partially taken
vals_size, min_align_of::<V>());
unsafe {
- deallocate(self.hashes as *mut u8, size, align);
+ deallocate(self.hashes.0 as *mut u8, size, align);
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}
use mem;
use clone::Clone;
+#[deriving(Send, Sync)]
struct Inner {
thread: Thread,
woken: AtomicBool,
use alloc::arc::Arc;
use core::kinds::marker;
use core::mem;
-use core::cell::UnsafeCell;
+use core::cell::{UnsafeCell, RacyCell};
pub use self::select::{Select, Handle};
use self::select::StartResult;
#[unstable]
pub struct Receiver<T> {
inner: UnsafeCell<Flavor<T>>,
- // can't share in an arc
- _marker: marker::NoSync,
}
+// The receiver port can be sent from place to place, so long as it
+// is not used to receive non-sendable things.
+impl<T:Send> Send for Receiver<T> { }
+
/// An iterator over messages on a receiver, this iterator will block
/// whenever `next` is called, waiting for a new message, and `None` will be
/// returned when the corresponding channel has hung up.
#[unstable]
pub struct Sender<T> {
inner: UnsafeCell<Flavor<T>>,
- // can't share in an arc
- _marker: marker::NoSync,
}
+// The send port can be sent from place to place, so long as it
+// is not used to send non-sendable things.
+impl<T:Send> Send for Sender<T> { }
+
/// The sending-half of Rust's synchronous channel type. This half can only be
/// owned by one task, but it can be cloned to send to other tasks.
#[unstable = "this type may be renamed, but it will always exist"]
pub struct SyncSender<T> {
- inner: Arc<UnsafeCell<sync::Packet<T>>>,
+ inner: Arc<RacyCell<sync::Packet<T>>>,
// can't share in an arc
_marker: marker::NoSync,
}
}
enum Flavor<T> {
- Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
- Stream(Arc<UnsafeCell<stream::Packet<T>>>),
- Shared(Arc<UnsafeCell<shared::Packet<T>>>),
- Sync(Arc<UnsafeCell<sync::Packet<T>>>),
+ Oneshot(Arc<RacyCell<oneshot::Packet<T>>>),
+ Stream(Arc<RacyCell<stream::Packet<T>>>),
+ Shared(Arc<RacyCell<shared::Packet<T>>>),
+ Sync(Arc<RacyCell<sync::Packet<T>>>),
}
#[doc(hidden)]
/// ```
#[unstable]
pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
- let a = Arc::new(UnsafeCell::new(oneshot::Packet::new()));
+ let a = Arc::new(RacyCell::new(oneshot::Packet::new()));
(Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
}
#[unstable = "this function may be renamed to more accurately reflect the type \
of channel that is is creating"]
pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
- let a = Arc::new(UnsafeCell::new(sync::Packet::new(bound)));
+ let a = Arc::new(RacyCell::new(sync::Packet::new(bound)));
(SyncSender::new(a.clone()), Receiver::new(Sync(a)))
}
fn new(inner: Flavor<T>) -> Sender<T> {
Sender {
inner: UnsafeCell::new(inner),
- _marker: marker::NoSync,
}
}
if !(*p).sent() {
return (*p).send(t);
} else {
- let a = Arc::new(UnsafeCell::new(stream::Packet::new()));
+ let a =
+ Arc::new(RacyCell::new(stream::Packet::new()));
match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
oneshot::UpSuccess => {
let ret = (*a.get()).send(t);
fn clone(&self) -> Sender<T> {
let (packet, sleeper, guard) = match *unsafe { self.inner() } {
Oneshot(ref p) => {
- let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
+ let a = Arc::new(RacyCell::new(shared::Packet::new()));
unsafe {
let guard = (*a.get()).postinit_lock();
match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
}
}
Stream(ref p) => {
- let a = Arc::new(UnsafeCell::new(shared::Packet::new()));
+ let a = Arc::new(RacyCell::new(shared::Packet::new()));
unsafe {
let guard = (*a.get()).postinit_lock();
match (*p.get()).upgrade(Receiver::new(Shared(a.clone()))) {
////////////////////////////////////////////////////////////////////////////////
impl<T: Send> SyncSender<T> {
- fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
+ fn new(inner: Arc<RacyCell<sync::Packet<T>>>) -> SyncSender<T> {
SyncSender { inner: inner, _marker: marker::NoSync }
}
impl<T: Send> Receiver<T> {
fn new(inner: Flavor<T>) -> Receiver<T> {
- Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync }
+ Receiver { inner: UnsafeCell::new(inner) }
}
/// Blocks waiting for a value on this receiver
tail: UnsafeCell<*mut Node<T>>,
}
+impl<T:Send> Send for Queue<T> { }
+impl<T:Send> Sync for Queue<T> { }
+
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(box Node {
cache_subtractions: AtomicUint,
}
+impl<T: Send> Send for Queue<T> { }
+
+impl<T: Send> Sync for Queue<T> { }
+
impl<T: Send> Node<T> {
fn new() -> *mut Node<T> {
unsafe {
lock: Mutex<State<T>>,
}
+impl<T:Send> Send for Packet<T> { }
+
+impl<T:Send> Sync for Packet<T> { }
+
+#[deriving(Send)]
struct State<T> {
disconnected: bool, // Is the channel disconnected yet?
queue: Queue, // queue of senders waiting to send data
next: *mut Node,
}
+impl Send for Node {}
+
/// A simple ring-buffer
struct Buffer<T> {
buf: Vec<Option<T>>,
data: UnsafeCell<T>,
}
+impl<T:Send> Send for Exclusive<T> { }
+
+impl<T:Send> Sync for Exclusive<T> { }
+
/// An RAII guard returned via `lock`
pub struct ExclusiveGuard<'a, T:'a> {
// FIXME #12808: strange name to try to avoid interfering with
use prelude::*;
-use cell::UnsafeCell;
+use cell::{UnsafeCell, RacyCell};
use kinds::marker;
use sync::{poison, AsMutexGuard};
use sys_common::mutex as sys;
// time, so to ensure that the native mutex is used correctly we box the
// inner lock to give it a constant address.
inner: Box<StaticMutex>,
- data: UnsafeCell<T>,
+ data: RacyCell<T>,
}
+impl<T:Send> Send for Mutex<T> { }
+
+impl<T:Send> Sync for Mutex<T> { }
+
/// The static mutex type is provided to allow for static allocation of mutexes.
///
/// Note that this is a separate type because using a Mutex correctly means that
/// }
/// // lock is unlocked here.
/// ```
+#[deriving(Sync)]
pub struct StaticMutex {
lock: sys::Mutex,
- poison: UnsafeCell<poison::Flag>,
+ poison: RacyCell<poison::Flag>,
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// other mutex constants.
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: sys::MUTEX_INIT,
- poison: UnsafeCell { value: poison::Flag { failed: false } },
+ poison: RacyCell(UnsafeCell { value: poison::Flag { failed: false } }),
};
impl<T: Send> Mutex<T> {
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box MUTEX_INIT,
- data: UnsafeCell::new(t),
+ data: RacyCell::new(t),
}
}
/// // run initialization here
/// });
/// ```
+#[deriving(Sync)]
pub struct Once {
mutex: StaticMutex,
cnt: atomic::AtomicInt,
pub shutdown: UnsafeCell<bool>,
}
+impl<M:Send> Send for Helper<M> { }
+
+impl<M:Send> Sync for Helper<M> { }
+
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
/// This is the thinnest cross-platform wrapper around OS mutexes. All usage of
/// this mutex is unsafe and it is recommended to instead use the safe wrapper
/// at the top level of the crate instead of this type.
+#[deriving(Sync)]
pub struct Mutex(imp::Mutex);
/// Constant initializer for statically allocated mutexes.
sa_restorer: *mut libc::c_void,
}
+ impl ::kinds::Send for sigaction { }
+ impl ::kinds::Sync for sigaction { }
+
#[repr(C)]
#[cfg(target_word_size = "32")]
pub struct sigset_t {
sa_resv: [libc::c_int, ..1],
}
+ impl ::kinds::Send for sigaction { }
+ impl ::kinds::Sync for sigaction { }
+
#[repr(C)]
pub struct sigset_t {
__val: [libc::c_ulong, ..32],
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use cell::UnsafeCell;
+use cell::{UnsafeCell, RacyCell};
use sys::sync as ffi;
use sys_common::mutex;
-pub struct Mutex { inner: UnsafeCell<ffi::pthread_mutex_t> }
+#[deriving(Sync)]
+pub struct Mutex { inner: RacyCell<ffi::pthread_mutex_t> }
#[inline]
pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t {
}
pub const MUTEX_INIT: Mutex = Mutex {
- inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
+ inner: RacyCell(UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER }),
};
impl Mutex {
// Unix Listener
////////////////////////////////////////////////////////////////////////////////
+#[deriving(Sync)]
pub struct UnixListener {
inner: Inner,
path: CString,
deadline: u64,
}
+#[deriving(Sync)]
struct AcceptorInner {
listener: UnixListener,
reader: FileDesc,
pub static SIGSTKSZ: libc::size_t = 8192;
- pub static SIG_DFL: sighandler_t = 0i as sighandler_t;
+ pub const SIG_DFL: sighandler_t = 0i as sighandler_t;
// This definition is not as accurate as it could be, {si_addr} is
// actually a giant union. Currently we're only interested in that field,
// TCP listeners
////////////////////////////////////////////////////////////////////////////////
+#[deriving(Sync)]
pub struct TcpListener {
pub inner: FileDesc,
}
deadline: u64,
}
+#[deriving(Sync)]
struct AcceptorInner {
listener: TcpListener,
reader: FileDesc,
use any::Any;
use borrow::IntoCow;
use boxed::Box;
-use cell::UnsafeCell;
+use cell::RacyCell;
use clone::Clone;
-use kinds::Send;
+use kinds::{Send, Sync};
use ops::{Drop, FnOnce};
use option::Option::{mod, Some, None};
use result::Result::{Err, Ok};
}
fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> JoinGuard<T> {
- let my_packet = Arc::new(UnsafeCell::new(None));
+ let my_packet = Arc::new(RacyCell::new(None));
let their_packet = my_packet.clone();
let Builder { name, stack_size, stdout, stderr } = self;
}
}
+#[deriving(Sync)]
struct Inner {
name: Option<String>,
lock: Mutex<bool>, // true when there is a buffered unpark
cvar: Condvar,
}
-#[deriving(Clone)]
+#[deriving(Clone, Sync)]
/// A handle to a thread.
pub struct Thread {
inner: Arc<Inner>,
native: imp::rust_thread,
thread: Thread,
joined: bool,
- packet: Arc<UnsafeCell<Option<Result<T>>>>,
+ packet: Arc<RacyCell<Option<Result<T>>>>,
}
impl<T: Send> JoinGuard<T> {
pub dtor_running: UnsafeCell<bool>, // should be Cell
}
+ impl<T> ::kinds::Sync for Key<T> { }
+
#[doc(hidden)]
impl<T> Key<T> {
pub unsafe fn get(&'static self) -> Option<&'static T> {
pub os: OsStaticKey,
}
+ impl<T> ::kinds::Sync for Key<T> { }
+
struct Value<T: 'static> {
key: &'static Key<T>,
value: T,
mod imp {
use std::cell::UnsafeCell;
- // FIXME: Should be a `Cell`, but that's not `Sync`
+ // SNAP c9f6d69 switch to `Cell`
#[doc(hidden)]
pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> }
+ #[cfg(not(stage0))] impl<T> ::kinds::Sync for KeyInner<T> { }
+
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub marker: marker::InvariantType<T>,
}
+ #[cfg(not(stage0))] impl<T> ::kinds::Sync for KeyInner<T> { }
+
#[doc(hidden)]
impl<T> KeyInner<T> {
#[doc(hidden)]
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
+impl Send for MonitorMsg {}
+
fn run_tests<F>(opts: &TestOpts,
tests: Vec<TestDescAndFn> ,
mut callback: F) -> io::IoResult<()> where
#![allow(dead_code)]
#![allow(unused_unsafe)]
+use std::kinds::Sync;
+
struct Foo {
a: uint,
b: *const ()
}
+impl Sync for Foo {}
+
fn foo<T>(a: T) -> T {
a
}
use std::ptr;
-static a: *const u8 = 0 as *const u8;
+struct TestStruct {
+ x: *const u8
+}
+
+impl Sync for TestStruct {}
+
+static a: TestStruct = TestStruct{x: 0 as *const u8};
pub fn main() {
- assert_eq!(a, ptr::null());
+ assert_eq!(a.x, ptr::null());
}
extern crate libc;
-extern fn foo() {}
+struct TestStruct {
+ x: *const libc::c_void
+}
+impl Sync for TestStruct {}
+
+extern fn foo() {}
const x: extern "C" fn() = foo;
-static y: *const libc::c_void = x as *const libc::c_void;
-const a: &'static int = &10;
-static b: *const int = a as *const int;
+static y: TestStruct = TestStruct { x: x as *const libc::c_void };
pub fn main() {
- assert_eq!(x as *const libc::c_void, y);
- assert_eq!(a as *const int, b);
+ assert_eq!(x as *const libc::c_void, y.x);
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-static TEST_VALUE : *const [int; 2] = 0x1234 as *const [int; 2];
+struct TestStruct {
+ x: *const [int; 2]
+}
+
+impl Sync for TestStruct {}
+
+static TEST_VALUE : TestStruct = TestStruct{x: 0x1234 as *const [int; 2]};
fn main() {}
// except according to those terms.
use std::kinds::marker;
-use std::cell::UnsafeCell;
+use std::cell::{UnsafeCell, RacyCell};
struct MyUnsafe<T> {
- value: UnsafeCell<T>
+ value: RacyCell<T>
}
impl<T> MyUnsafe<T> {
enum UnsafeEnum<T> {
VariantSafe,
- VariantUnsafe(UnsafeCell<T>)
+ VariantUnsafe(RacyCell<T>)
}
static STATIC1: UnsafeEnum<int> = UnsafeEnum::VariantSafe;
-static STATIC2: UnsafeCell<int> = UnsafeCell { value: 1 };
-const CONST: UnsafeCell<int> = UnsafeCell { value: 1 };
+static STATIC2: RacyCell<int> = RacyCell(UnsafeCell { value: 1 });
+const CONST: RacyCell<int> = RacyCell(UnsafeCell { value: 1 });
static STATIC3: MyUnsafe<int> = MyUnsafe{value: CONST};
-static STATIC4: &'static UnsafeCell<int> = &STATIC2;
+static STATIC4: &'static RacyCell<int> = &STATIC2;
struct Wrap<T> {
value: T
}
-static UNSAFE: UnsafeCell<int> = UnsafeCell{value: 1};
-static WRAPPED_UNSAFE: Wrap<&'static UnsafeCell<int>> = Wrap { value: &UNSAFE };
+impl<T: Send> Sync for Wrap<T> {}
+
+static UNSAFE: RacyCell<int> = RacyCell(UnsafeCell{value: 1});
+static WRAPPED_UNSAFE: Wrap<&'static RacyCell<int>> = Wrap { value: &UNSAFE };
fn main() {
let a = &STATIC1;
use std::mem::{replace, swap};
use std::mem;
use std::task;
+ use std::kinds::Send;
pub struct Stuff<T> {
state: state,
payload: Option<T>
}
+ impl<T:Send> Send for packet<T> {}
+
pub fn packet<T:Send>() -> *const packet<T> {
unsafe {
let p: *const packet<T> = mem::transmute(box Stuff{
use std::mem;
pub struct ping(::pipes::send_packet<pong>);
+
+ unsafe impl Send for ping {}
+
pub struct pong(::pipes::send_packet<ping>);
+ unsafe impl Send for pong {}
+
pub fn liberate_ping(p: ping) -> ::pipes::send_packet<pong> {
unsafe {
let _addr : *const ::pipes::send_packet<pong> = match &p {
// This test checks that the `_` type placeholder works
// correctly for enabling type inference.
-static CONSTEXPR: *const int = &413 as *const _;
+struct TestStruct {
+ x: *const int
+}
+
+impl Sync for TestStruct {}
+
+static CONSTEXPR: TestStruct = TestStruct{x: &413 as *const _};
+
pub fn main() {
let x: Vec<_> = range(0u, 5).collect();