$$(CRATE_FULLDEPS_$(1)_T_$(2)_H_$(3)_$(4)) \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
| $$(TLIB$(1)_T_$(2)_H_$(3))/
- @$$(call E, oxidize: $$(@D)/lib$(4))
+ @$$(call E, rustc: $$(@D)/lib$(4))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES,\
$$(dir $$@)$$(call CFG_LIB_GLOB_$(2),$(4)))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES,\
$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(dep)) \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
| $$(TBIN$(1)_T_$(4)_H_$(3))/
- @$$(call E, oxidize: $$@)
+ @$$(call E, rustc: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --cfg $(4)
endef
$(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2)): \
$$(CRATEFILE_$(4)) \
$$(TESTDEP_$(1)_$(2)_$(3)_$(4))
- @$$(call E, oxidize: $$@)
+ @$$(call E, rustc: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test \
-L "$$(RT_OUTPUT_DIR_$(2))" \
-L "$$(LLVM_LIBDIR_$(2))"
syn match rustAssert "\<assert\(\w\)*!" contained
syn match rustFail "\<fail\(\w\)*!" contained
-syn keyword rustKeyword break box continue
-syn keyword rustKeyword extern nextgroup=rustExternCrate,rustObsoleteExternMod skipwhite
+syn keyword rustKeyword break
+syn keyword rustKeyword box nextgroup=rustBoxPlacement skipwhite skipempty
+syn keyword rustKeyword continue
+syn keyword rustKeyword extern nextgroup=rustExternCrate,rustObsoleteExternMod skipwhite skipempty
+syn keyword rustKeyword fn nextgroup=rustFuncName skipwhite skipempty
syn keyword rustKeyword for in if impl let
-syn keyword rustKeyword loop once priv pub
-syn keyword rustKeyword return
-syn keyword rustKeyword unsafe while
-syn keyword rustKeyword use nextgroup=rustModPath skipwhite
+syn keyword rustKeyword loop once proc pub
+syn keyword rustKeyword return super
+syn keyword rustKeyword unsafe virtual while
+syn keyword rustKeyword use nextgroup=rustModPath skipwhite skipempty
" FIXME: Scoped impl's name is also fallen in this category
-syn keyword rustKeyword mod trait struct enum type nextgroup=rustIdentifier skipwhite
-syn keyword rustKeyword fn nextgroup=rustFuncName skipwhite
-syn keyword rustKeyword proc
+syn keyword rustKeyword mod trait struct enum type nextgroup=rustIdentifier skipwhite skipempty
syn keyword rustStorage mut ref static
syn keyword rustObsoleteStorage const
syn keyword rustInvalidBareKeyword crate
-syn keyword rustExternCrate crate contained nextgroup=rustIdentifier skipwhite
-syn keyword rustObsoleteExternMod mod contained nextgroup=rustIdentifier skipwhite
+syn keyword rustExternCrate crate contained nextgroup=rustIdentifier skipwhite skipempty
+syn keyword rustObsoleteExternMod mod contained nextgroup=rustIdentifier skipwhite skipempty
syn match rustIdentifier contains=rustIdentifierPrime "\%([^[:cntrl:][:space:][:punct:][:digit:]]\|_\)\%([^[:cntrl:][:punct:][:space:]]\|_\)*" display contained
syn match rustFuncName "\%([^[:cntrl:][:space:][:punct:][:digit:]]\|_\)\%([^[:cntrl:][:punct:][:space:]]\|_\)*" display contained
+syn region rustBoxPlacement matchgroup=rustBoxPlacementParens start="(" end=")" contains=TOP contained
+syn keyword rustBoxPlacementExpr GC containedin=rustBoxPlacement
+" Ideally we'd have syntax rules set up to match arbitrary expressions. Since
+" we don't, we'll just define temporary contained rules to handle balancing
+" delimiters.
+syn region rustBoxPlacementBalance start="(" end=")" containedin=rustBoxPlacement transparent
+syn region rustBoxPlacementBalance start="\[" end="\]" containedin=rustBoxPlacement transparent
+" {} are handled by rustFoldBraces
+
" Reserved (but not yet used) keywords {{{2
-syn keyword rustReservedKeyword alignof be do offsetof pure sizeof typeof yield
+syn keyword rustReservedKeyword alignof be do offsetof priv pure sizeof typeof unsized yield
" Built-in types {{{2
syn keyword rustType int uint float char bool u8 u16 u32 u64 f32
" to make it easy to update.
" Core operators {{{3
-syn keyword rustTrait Share Copy Send Sized
+syn keyword rustTrait Copy Send Sized Share
syn keyword rustTrait Add Sub Mul Div Rem Neg Not
syn keyword rustTrait BitAnd BitOr BitXor
-syn keyword rustTrait Drop
+syn keyword rustTrait Drop Deref DerefMut
syn keyword rustTrait Shl Shr Index
syn keyword rustEnum Option
syn keyword rustEnumVariant Some None
"syn keyword rustFunction drop
" Types and traits {{{3
-syn keyword rustTrait Any AnyOwnExt AnyRefExt AnyMutRefExt
syn keyword rustTrait Ascii AsciiCast OwnedAsciiCast AsciiStr IntoBytes
syn keyword rustTrait ToCStr
syn keyword rustTrait Char
syn keyword rustTrait Iterator DoubleEndedIterator RandomAccessIterator CloneableIterator
syn keyword rustTrait OrdIterator MutableDoubleEndedIterator ExactSize
syn keyword rustTrait Num NumCast CheckedAdd CheckedSub CheckedMul
-syn keyword rustTrait Signed Unsigned Round
-syn keyword rustTrait Primitive Int Float ToPrimitive FromPrimitive
+syn keyword rustTrait Signed Unsigned
+syn keyword rustTrait Primitive Int Float FloatMath ToPrimitive FromPrimitive
+"syn keyword rustTrait Expect
+syn keyword rustTrait Box
syn keyword rustTrait GenericPath Path PosixPath WindowsPath
syn keyword rustTrait RawPtr
syn keyword rustTrait Buffer Writer Reader Seek
-syn keyword rustTrait Str StrVector StrSlice OwnedStr IntoMaybeOwned StrBuf
+syn keyword rustTrait Str StrVector StrSlice OwnedStr IntoMaybeOwned
+syn keyword rustTrait StrAllocating
syn keyword rustTrait ToStr IntoStr
syn keyword rustTrait Tuple1 Tuple2 Tuple3 Tuple4
syn keyword rustTrait Tuple5 Tuple6 Tuple7 Tuple8
syn keyword rustTrait Tuple9 Tuple10 Tuple11 Tuple12
-syn keyword rustTrait ImmutableEqVector ImmutableTotalOrdVector ImmutableCloneableVector
-syn keyword rustTrait OwnedVector OwnedCloneableVector OwnedEqVector
-syn keyword rustTrait MutableVector MutableTotalOrdVector
-syn keyword rustTrait Vector VectorVector CloneableVector ImmutableVector
-
-"syn keyword rustFunction stream
-syn keyword rustTrait Sender Receiver
+syn keyword rustTrait CloneableVector ImmutableCloneableVector MutableCloneableVector
+syn keyword rustTrait ImmutableVector MutableVector
+syn keyword rustTrait ImmutableEqVector ImmutableTotalOrdVector MutableTotalOrdVector
+syn keyword rustTrait Vector VectorVector OwnedVector MutableVectorAllocating
+syn keyword rustTrait StrBuf
+syn keyword rustTrait Vec
+
+"syn keyword rustFunction sync_channel channel
+syn keyword rustTrait SyncSender Sender Receiver
"syn keyword rustFunction spawn
+"syn keyword rustConstant GC
+
syn keyword rustSelf self
syn keyword rustBoolean true false
hi def link rustInvalidBareKeyword Error
hi def link rustExternCrate rustKeyword
hi def link rustObsoleteExternMod Error
+hi def link rustBoxPlacementParens Delimiter
+hi def link rustBoxPlacementExpr rustKeyword
" Other Suggestions:
" hi rustAttribute ctermfg=cyan
// except according to those terms.
// FIXME: #13994: port to the sized deallocation API when available
-// FIXME: #13996: need a way to mark the `allocate` and `reallocate` return values as `noalias`
+// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias` and `nonnull`
use core::intrinsics::{abort, cttz32};
use core::option::{None, Option};
/// The allocator for unique pointers.
#[cfg(not(test))]
#[lang="exchange_malloc"]
-#[inline(always)]
-pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 {
- exchange_malloc(size, align)
-}
-
-/// The allocator for unique pointers.
#[inline]
-pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
+unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
// The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size
// allocations can point to this `static`. It would be incorrect to use a null
// pointer, due to enums assuming types like unique pointers are never null.
}
}
-#[cfg(not(test))]
+#[cfg(not(test), stage0)]
#[lang="exchange_free"]
#[inline]
-// FIXME: #13994 (rustc should pass align and size here)
unsafe fn exchange_free(ptr: *mut u8) {
deallocate(ptr, 0, 8);
}
+#[cfg(not(test), not(stage0))]
+#[lang="exchange_free"]
+#[inline]
+unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) {
+ deallocate(ptr, size, align);
+}
+
// FIXME: #7496
#[cfg(not(test))]
#[lang="closure_exchange_malloc"]
#[doc(hidden)]
#[deprecated]
#[cfg(not(test))]
-pub unsafe extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 {
- exchange_malloc(size, align)
+pub unsafe extern "C" fn rust_allocate(size: uint, align: uint) -> *mut u8 {
+ allocate(size, align)
}
// hack for libcore
#[doc(hidden)]
#[deprecated]
#[cfg(not(test))]
-pub unsafe extern "C" fn rust_free(ptr: *mut u8, size: uint, align: uint) {
+pub unsafe extern "C" fn rust_deallocate(ptr: *mut u8, size: uint, align: uint) {
deallocate(ptr, size, align)
}
use std::num;
use std::ptr::read;
use std::rc::Rc;
-use std::rt::heap::exchange_malloc;
+use std::rt::heap::allocate;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
- let chunk = exchange_malloc(size,
- mem::min_align_of::<TypedArenaChunk<T>>());
+ let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::overwrite(&mut chunk.next, next);
chunk
#[allow(ctypes)]
extern {
- fn rust_malloc(size: uint, align: uint) -> *u8;
- fn rust_free(ptr: *u8, size: uint, align: uint);
+ fn rust_allocate(size: uint, align: uint) -> *u8;
+ fn rust_deallocate(ptr: *u8, size: uint, align: uint);
}
unsafe fn alloc(cap: uint) -> *mut Vec<()> {
let cap = cap.checked_add(&mem::size_of::<Vec<()>>()).unwrap();
// this should use the real alignment, but the new representation will take care of that
- let ret = rust_malloc(cap, 8) as *mut Vec<()>;
+ let ret = rust_allocate(cap, 8) as *mut Vec<()>;
if ret.is_null() {
intrinsics::abort();
}
&(*ptr).data,
len);
// FIXME: #13994: port to the sized deallocation API when available
- rust_free(ptr as *u8, 0, 8);
+ rust_deallocate(ptr as *u8, 0, 8);
mem::forget(ret);
ret = mem::transmute(ptr2);
ptr = ptr2;
for j in range(0, *i as int) {
ptr::read(&*p.offset(j));
}
- rust_free(ret as *u8, 0, 8);
+ rust_deallocate(ret as *u8, 0, 8);
});
mem::transmute(ret)
}
//! This implementation is also used as the fallback implementation of an event
//! loop if no other one is provided (and M:N scheduling is desired).
+use alloc::arc::Arc;
+use std::sync::atomics;
use std::mem;
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback};
use std::rt::rtio::{PausableIdleCallback, Callback};
struct BasicLoop {
work: Vec<proc():Send>, // pending work
- idle: Option<*mut BasicPausable>, // only one is allowed
remotes: Vec<(uint, Box<Callback:Send>)>,
next_remote: uint,
messages: Exclusive<Vec<Message>>,
+ idle: Option<Box<Callback:Send>>,
+ idle_active: Option<Arc<atomics::AtomicBool>>,
}
enum Message { RunRemote(uint), RemoveRemote(uint) }
BasicLoop {
work: vec![],
idle: None,
+ idle_active: None,
next_remote: 0,
remotes: vec![],
messages: Exclusive::new(vec![]),
/// Run the idle callback if one is registered
fn idle(&mut self) {
- unsafe {
- match self.idle {
- Some(idle) => {
- if (*idle).active {
- (*idle).work.call();
- }
+ match self.idle {
+ Some(ref mut idle) => {
+ if self.idle_active.get_ref().load(atomics::SeqCst) {
+ idle.call();
}
- None => {}
}
+ None => {}
}
}
fn has_idle(&self) -> bool {
- unsafe { self.idle.is_some() && (**self.idle.get_ref()).active }
+ self.idle.is_some() && self.idle_active.get_ref().load(atomics::SeqCst)
}
}
// FIXME: Seems like a really weird requirement to have an event loop provide.
fn pausable_idle_callback(&mut self, cb: Box<Callback:Send>)
-> Box<PausableIdleCallback:Send> {
- let callback = box BasicPausable::new(self, cb);
rtassert!(self.idle.is_none());
- unsafe {
- let cb_ptr: &*mut BasicPausable = mem::transmute(&callback);
- self.idle = Some(*cb_ptr);
- }
- callback as Box<PausableIdleCallback:Send>
+ self.idle = Some(cb);
+ let a = Arc::new(atomics::AtomicBool::new(true));
+ self.idle_active = Some(a.clone());
+ box BasicPausable { active: a } as Box<PausableIdleCallback:Send>
}
fn remote_callback(&mut self, f: Box<Callback:Send>)
}
struct BasicPausable {
- eloop: *mut BasicLoop,
- work: Box<Callback:Send>,
- active: bool,
-}
-
-impl BasicPausable {
- fn new(eloop: &mut BasicLoop, cb: Box<Callback:Send>) -> BasicPausable {
- BasicPausable {
- active: false,
- work: cb,
- eloop: eloop,
- }
- }
+ active: Arc<atomics::AtomicBool>,
}
impl PausableIdleCallback for BasicPausable {
fn pause(&mut self) {
- self.active = false;
+ self.active.store(false, atomics::SeqCst);
}
fn resume(&mut self) {
- self.active = true;
+ self.active.store(true, atomics::SeqCst);
}
}
impl Drop for BasicPausable {
fn drop(&mut self) {
- unsafe {
- (*self.eloop).idle = None;
- }
+ self.active.store(false, atomics::SeqCst);
}
}
#[cfg(test)] extern crate rustuv;
extern crate rand;
extern crate libc;
+extern crate alloc;
+use alloc::arc::Arc;
use std::mem::replace;
use std::os;
use std::rt::rtio;
use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::TaskOpts;
-use std::sync::arc::UnsafeArc;
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor};
use sleeper_list::SleeperList;
/// sending on a channel once the entire pool has been drained of all tasks.
#[deriving(Clone)]
struct TaskState {
- cnt: UnsafeArc<AtomicUint>,
+ cnt: Arc<AtomicUint>,
done: Sender<()>,
}
pool.sleepers.clone(),
pool.task_state.clone());
pool.handles.push(sched.make_handle());
- let sched = sched;
pool.threads.push(Thread::start(proc() { sched.bootstrap(); }));
}
self.task_state.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
- let sched = sched;
self.threads.push(Thread::start(proc() { sched.bootstrap() }));
return ret;
fn new() -> (Receiver<()>, TaskState) {
let (tx, rx) = channel();
(rx, TaskState {
- cnt: UnsafeArc::new(AtomicUint::new(0)),
+ cnt: Arc::new(AtomicUint::new(0)),
done: tx,
})
}
fn increment(&mut self) {
- unsafe { (*self.cnt.get()).fetch_add(1, SeqCst); }
+ self.cnt.fetch_add(1, SeqCst);
}
fn active(&self) -> bool {
- unsafe { (*self.cnt.get()).load(SeqCst) != 0 }
+ self.cnt.load(SeqCst) != 0
}
fn decrement(&mut self) {
- let prev = unsafe { (*self.cnt.get()).fetch_sub(1, SeqCst) };
+ let prev = self.cnt.fetch_sub(1, SeqCst);
if prev == 1 {
self.done.send(());
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use alloc::arc::Arc;
use mpsc = std::sync::mpsc_queue;
-use std::sync::arc::UnsafeArc;
+use std::kinds::marker;
pub enum PopResult<T> {
Inconsistent,
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
- let (a, b) = UnsafeArc::new2(mpsc::Queue::new());
- (Consumer { inner: a }, Producer { inner: b })
+ let a = Arc::new(mpsc::Queue::new());
+ (Consumer { inner: a.clone(), noshare: marker::NoShare },
+ Producer { inner: a, noshare: marker::NoShare })
}
pub struct Producer<T> {
- inner: UnsafeArc<mpsc::Queue<T>>,
+ inner: Arc<mpsc::Queue<T>>,
+ noshare: marker::NoShare,
}
pub struct Consumer<T> {
- inner: UnsafeArc<mpsc::Queue<T>>,
+ inner: Arc<mpsc::Queue<T>>,
+ noshare: marker::NoShare,
}
impl<T: Send> Consumer<T> {
- pub fn pop(&mut self) -> PopResult<T> {
- match unsafe { (*self.inner.get()).pop() } {
+ pub fn pop(&self) -> PopResult<T> {
+ match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
- pub fn casual_pop(&mut self) -> Option<T> {
- match unsafe { (*self.inner.get()).pop() } {
+ pub fn casual_pop(&self) -> Option<T> {
+ match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
impl<T: Send> Producer<T> {
- pub fn push(&mut self, t: T) {
- unsafe { (*self.inner.get()).push(t); }
+ pub fn push(&self, t: T) {
+ self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
- Producer { inner: self.inner.clone() }
+ Producer { inner: self.inner.clone(), noshare: marker::NoShare }
}
}
Thread::start(proc() {
let sleepers = SleeperList::new();
- let mut pool = BufferPool::new();
+ let pool = BufferPool::new();
let (normal_worker, normal_stealer) = pool.deque();
let (special_worker, special_stealer) = pool.deque();
let queues = vec![normal_stealer, special_stealer];
//! Blocking posix-based file I/O
+use alloc::arc::Arc;
use libc::{c_int, c_void};
use libc;
use std::c_str::CString;
use std::io;
use std::mem;
use std::rt::rtio;
-use std::sync::arc::UnsafeArc;
use io::{IoResult, retry, keep_going};
}
pub struct FileDesc {
- inner: UnsafeArc<Inner>
+ inner: Arc<Inner>
}
impl FileDesc {
/// Note that all I/O operations done on this object will be *blocking*, but
/// they do not require the runtime to be active.
pub fn new(fd: fd_t, close_on_drop: bool) -> FileDesc {
- FileDesc { inner: UnsafeArc::new(Inner {
+ FileDesc { inner: Arc::new(Inner {
fd: fd,
close_on_drop: close_on_drop
}) }
}
}
- pub fn fd(&self) -> fd_t {
- // This unsafety is fine because we're just reading off the file
- // descriptor, no one is modifying this.
- unsafe { (*self.inner.get()).fd }
- }
+ pub fn fd(&self) -> fd_t { self.inner.fd }
}
impl io::Reader for FileDesc {
//! Blocking win32-based file I/O
+use alloc::arc::Arc;
+use libc::{c_int, c_void};
+use libc;
use std::c_str::CString;
use std::io::IoError;
use std::io;
-use libc::{c_int, c_void};
-use libc;
use std::mem;
use std::os::win32::{as_utf16_p, fill_utf16_buf_and_decode};
use std::ptr;
use std::rt::rtio;
use std::str;
-use std::sync::arc::UnsafeArc;
use std::vec;
use io::IoResult;
}
pub struct FileDesc {
- inner: UnsafeArc<Inner>
+ inner: Arc<Inner>
}
impl FileDesc {
/// Note that all I/O operations done on this object will be *blocking*, but
/// they do not require the runtime to be active.
pub fn new(fd: fd_t, close_on_drop: bool) -> FileDesc {
- FileDesc { inner: UnsafeArc::new(Inner {
+ FileDesc { inner: Arc::new(Inner {
fd: fd,
close_on_drop: close_on_drop
}) }
Ok(())
}
- pub fn fd(&self) -> fd_t {
- // This unsafety is fine because we're just reading off the file
- // descriptor, no one is modifying this.
- unsafe { (*self.inner.get()).fd }
- }
+ pub fn fd(&self) -> fd_t { self.inner.fd }
pub fn handle(&self) -> libc::HANDLE {
unsafe { libc::get_osfhandle(self.fd()) as libc::HANDLE }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use alloc::arc::Arc;
use libc;
use std::io::net::ip;
use std::io;
use std::mem;
use std::rt::rtio;
-use std::sync::arc::UnsafeArc;
use std::unstable::mutex;
use super::{IoResult, retry, keep_going};
////////////////////////////////////////////////////////////////////////////////
pub struct TcpStream {
- inner: UnsafeArc<Inner>,
+ inner: Arc<Inner>,
read_deadline: u64,
write_deadline: u64,
}
fn new(inner: Inner) -> TcpStream {
TcpStream {
- inner: UnsafeArc::new(inner),
+ inner: Arc::new(inner),
read_deadline: 0,
write_deadline: 0,
}
}
- pub fn fd(&self) -> sock_t {
- // This unsafety is fine because it's just a read-only arc
- unsafe { (*self.inner.get()).fd }
- }
+ pub fn fd(&self) -> sock_t { self.inner.fd }
fn set_nodelay(&mut self, nodelay: bool) -> IoResult<()> {
setsockopt(self.fd(), libc::IPPROTO_TCP, libc::TCP_NODELAY,
fn lock_nonblocking<'a>(&'a self) -> Guard<'a> {
let ret = Guard {
fd: self.fd(),
- guard: unsafe { (*self.inner.get()).lock.lock() },
+ guard: unsafe { self.inner.lock.lock() },
};
assert!(util::set_nonblocking(self.fd(), true).is_ok());
ret
////////////////////////////////////////////////////////////////////////////////
pub struct UdpSocket {
- inner: UnsafeArc<Inner>,
+ inner: Arc<Inner>,
read_deadline: u64,
write_deadline: u64,
}
pub fn bind(addr: ip::SocketAddr) -> IoResult<UdpSocket> {
let fd = try!(socket(addr, libc::SOCK_DGRAM));
let ret = UdpSocket {
- inner: UnsafeArc::new(Inner::new(fd)),
+ inner: Arc::new(Inner::new(fd)),
read_deadline: 0,
write_deadline: 0,
};
}
}
- pub fn fd(&self) -> sock_t {
- // unsafety is fine because it's just a read-only arc
- unsafe { (*self.inner.get()).fd }
- }
+ pub fn fd(&self) -> sock_t { self.inner.fd }
pub fn set_broadcast(&mut self, on: bool) -> IoResult<()> {
setsockopt(self.fd(), libc::SOL_SOCKET, libc::SO_BROADCAST,
fn lock_nonblocking<'a>(&'a self) -> Guard<'a> {
let ret = Guard {
fd: self.fd(),
- guard: unsafe { (*self.inner.get()).lock.lock() },
+ guard: unsafe { self.inner.lock.lock() },
};
assert!(util::set_nonblocking(self.fd(), true).is_ok());
ret
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use alloc::arc::Arc;
use libc;
use std::c_str::CString;
use std::intrinsics;
use std::io;
use std::mem;
use std::rt::rtio;
-use std::sync::arc::UnsafeArc;
use std::unstable::mutex;
use super::{IoResult, retry};
////////////////////////////////////////////////////////////////////////////////
pub struct UnixStream {
- inner: UnsafeArc<Inner>,
+ inner: Arc<Inner>,
read_deadline: u64,
write_deadline: u64,
}
pub fn connect(addr: &CString,
timeout: Option<u64>) -> IoResult<UnixStream> {
connect(addr, libc::SOCK_STREAM, timeout).map(|inner| {
- UnixStream::new(UnsafeArc::new(inner))
+ UnixStream::new(Arc::new(inner))
})
}
- fn new(inner: UnsafeArc<Inner>) -> UnixStream {
+ fn new(inner: Arc<Inner>) -> UnixStream {
UnixStream {
inner: inner,
read_deadline: 0,
}
}
- fn fd(&self) -> fd_t { unsafe { (*self.inner.get()).fd } }
+ fn fd(&self) -> fd_t { self.inner.fd }
#[cfg(target_os = "linux")]
fn lock_nonblocking(&self) {}
fn lock_nonblocking<'a>(&'a self) -> net::Guard<'a> {
let ret = net::Guard {
fd: self.fd(),
- guard: unsafe { (*self.inner.get()).lock.lock() },
+ guard: unsafe { self.inner.lock.lock() },
};
assert!(util::set_nonblocking(self.fd(), true).is_ok());
ret
&mut size as *mut libc::socklen_t) as libc::c_int
}) {
-1 => Err(super::last_error()),
- fd => Ok(UnixStream::new(UnsafeArc::new(Inner::new(fd))))
+ fd => Ok(UnixStream::new(Arc::new(Inner::new(fd))))
}
}
}
//! the test suite passing (the suite is in libstd), and that's good enough for
//! me!
+use alloc::arc::Arc;
use libc;
use std::c_str::CString;
use std::io;
use std::os;
use std::ptr;
use std::rt::rtio;
-use std::sync::arc::UnsafeArc;
use std::sync::atomics;
use std::unstable::mutex;
////////////////////////////////////////////////////////////////////////////////
pub struct UnixStream {
- inner: UnsafeArc<Inner>,
+ inner: Arc<Inner>,
write: Option<Event>,
read: Option<Event>,
read_deadline: u64,
Err(super::last_error())
} else {
Ok(UnixStream {
- inner: UnsafeArc::new(inner),
+ inner: Arc::new(inner),
read: None,
write: None,
read_deadline: 0,
})
}
- fn handle(&self) -> libc::HANDLE { unsafe { (*self.inner.get()).handle } }
+ fn handle(&self) -> libc::HANDLE { self.inner.handle }
fn read_closed(&self) -> bool {
- unsafe { (*self.inner.get()).read_closed.load(atomics::SeqCst) }
+ self.inner.read_closed.load(atomics::SeqCst)
}
fn write_closed(&self) -> bool {
- unsafe { (*self.inner.get()).write_closed.load(atomics::SeqCst) }
+ self.inner.write_closed.load(atomics::SeqCst)
}
fn cancel_io(&self) -> IoResult<()> {
// acquire the lock.
//
// See comments in close_read() about why this lock is necessary.
- let guard = unsafe { (*self.inner.get()).lock.lock() };
+ let guard = unsafe { self.inner.lock.lock() };
if self.read_closed() {
return Err(io::standard_error(io::EndOfFile))
}
// going after we woke up.
//
// See comments in close_read() about why this lock is necessary.
- let guard = unsafe { (*self.inner.get()).lock.lock() };
+ let guard = unsafe { self.inner.lock.lock() };
if self.write_closed() {
return Err(io::standard_error(io::BrokenPipe))
}
// close_read() between steps 1 and 2. By atomically executing steps 1
// and 2 with a lock with respect to close_read(), we're guaranteed that
// no thread will erroneously sit in a read forever.
- let _guard = unsafe { (*self.inner.get()).lock.lock() };
- unsafe { (*self.inner.get()).read_closed.store(true, atomics::SeqCst) }
+ let _guard = unsafe { self.inner.lock.lock() };
+ self.inner.read_closed.store(true, atomics::SeqCst);
self.cancel_io()
}
fn close_write(&mut self) -> IoResult<()> {
// see comments in close_read() for why this lock is necessary
- let _guard = unsafe { (*self.inner.get()).lock.lock() };
- unsafe { (*self.inner.get()).write_closed.store(true, atomics::SeqCst) }
+ let _guard = unsafe { self.inner.lock.lock() };
+ self.inner.write_closed.store(true, atomics::SeqCst);
self.cancel_io()
}
// Transfer ownership of our handle into this stream
Ok(UnixStream {
- inner: UnsafeArc::new(Inner::new(handle)),
+ inner: Arc::new(Inner::new(handle)),
read: None,
write: None,
read_deadline: 0,
// answer is that you don't need them)
#![feature(macro_rules)]
+extern crate alloc;
extern crate libc;
use std::os;
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
-// FIXME #5734:: Need generic sin/cos for .to/from_polar().
-// FIXME #5735: Need generic sqrt to implement .norm().
-
-
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Complex<T> {
use syntax::parse::token;
use syntax::util::small_vector::SmallVector;
+use std::mem;
+
pub static VERSION: &'static str = "0.11.0-pre";
pub fn maybe_inject_crates_ref(sess: &Session, krate: ast::Crate)
}
impl<'a> fold::Folder for StandardLibraryInjector<'a> {
- fn fold_crate(&mut self, krate: ast::Crate) -> ast::Crate {
+ fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
let mut vis = vec!(ast::ViewItem {
node: ast::ViewItemExternCrate(token::str_to_ident("std"),
with_version("std"),
ast::DUMMY_NODE_ID),
attrs: vec!(
- attr::mk_attr(attr::mk_list_item(
+ attr::mk_attr_outer(attr::mk_list_item(
InternedString::new("phase"),
vec!(
attr::mk_word_item(InternedString::new("syntax")),
}
// `extern crate` must be precede `use` items
- vis.push_all_move(krate.module.view_items.clone());
- let new_module = ast::Mod {
- view_items: vis,
- ..krate.module.clone()
- };
+ mem::swap(&mut vis, &mut krate.module.view_items);
+ krate.module.view_items.push_all_move(vis);
- ast::Crate {
- module: new_module,
- ..krate
- }
+ // don't add #![no_std] here, that will block the prelude injection later.
+ // Add it during the prelude injection instead.
+
+ // Add #![feature(phase)] here, because we use #[phase] on extern crate std.
+ let feat_phase_attr = attr::mk_attr_inner(attr::mk_list_item(
+ InternedString::new("feature"),
+ vec![attr::mk_word_item(InternedString::new("phase"))],
+ ));
+ krate.attrs.push(feat_phase_attr);
+
+ krate
}
}
impl<'a> fold::Folder for PreludeInjector<'a> {
- fn fold_crate(&mut self, krate: ast::Crate) -> ast::Crate {
+ fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
+ // Add #![no_std] here, so we don't re-inject when compiling pretty-printed source.
+ // This must happen here and not in StandardLibraryInjector because this
+ // fold happens second.
+
+ let no_std_attr = attr::mk_attr_inner(attr::mk_word_item(InternedString::new("no_std")));
+ krate.attrs.push(no_std_attr);
+
if !no_prelude(krate.attrs.as_slice()) {
// only add `use std::prelude::*;` if there wasn't a
// `#![no_implicit_prelude]` at the crate level.
- let mut attrs = krate.attrs.clone();
-
// fold_mod() will insert glob path.
- let globs_attr = attr::mk_attr(attr::mk_list_item(
+ let globs_attr = attr::mk_attr_inner(attr::mk_list_item(
InternedString::new("feature"),
vec!(
attr::mk_word_item(InternedString::new("globs")),
)));
- attrs.push(globs_attr);
+ krate.attrs.push(globs_attr);
- ast::Crate {
- module: self.fold_mod(&krate.module),
- attrs: attrs,
- ..krate
- }
- } else {
- krate
+ krate.module = self.fold_mod(&krate.module);
}
+ krate
}
fn fold_item(&mut self, item: @ast::Item) -> SmallVector<@ast::Item> {
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_str = InternedString::new("!resolve_unexported");
let resolve_unexported_attr =
- attr::mk_attr(attr::mk_word_item(resolve_unexported_str));
+ attr::mk_attr_inner(attr::mk_word_item(resolve_unexported_str));
let item = ast::Item {
ident: token::str_to_ident("__test"),
fn synthesize_crateid_attr(ecx: &EncodeContext) -> Attribute {
assert!(!ecx.link_meta.crateid.name.is_empty());
- attr::mk_attr(
+ attr::mk_attr_inner(
attr::mk_name_value_item_str(
InternedString::new("crate_id"),
token::intern_and_get_ident(ecx.link_meta.crateid.to_str())))
_ => *r.get(0)
}
}
+ None if v.len() == 0 => return not_useful,
None => v[0]
};
let left_ty = if real_pat.id == 0 { ty::mk_nil() }
let ms = m.iter().filter_map(|r| {
specialize(cx, r.as_slice(), &ctor, arity, lty)
}).collect::<matrix>();
- let could_be_useful = is_useful(
- cx, &ms, specialize(cx, v, &ctor, arity, lty).unwrap().as_slice());
+ let could_be_useful = match specialize(cx, v, &ctor, arity, lty) {
+ Some(v) => is_useful(cx, &ms, v.as_slice()),
+ None => return not_useful,
+ };
match could_be_useful {
useful_ => useful(lty, ctor),
u => u,
return;
}
- let mut imports = module.imports.borrow_mut();
+ let imports = module.imports.borrow();
let import_count = imports.len();
while module.resolved_import_count.get() < import_count {
let import_index = module.resolved_import_count.get();
let f = decl_rust_fn(ccx, false, inputs, output, name);
csearch::get_item_attrs(&ccx.sess().cstore, did, |meta_items| {
- set_llvm_fn_attrs(meta_items.iter().map(|&x| attr::mk_attr(x))
+ set_llvm_fn_attrs(meta_items.iter().map(|&x| attr::mk_attr_outer(x))
.collect::<Vec<_>>().as_slice(), f)
});
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
- heap: Heap) {
+ heap: Heap,
+ content_ty: ty::t) {
/*!
* Schedules a call to `free(val)`. Note that this is a shallow
* operation.
*/
- let drop = box FreeValue { ptr: val, heap: heap };
+ let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
pub struct FreeValue {
ptr: ValueRef,
heap: Heap,
+ content_ty: ty::t
}
impl Cleanup for FreeValue {
glue::trans_free(bcx, self.ptr)
}
HeapExchange => {
- glue::trans_exchange_free(bcx, self.ptr)
+ glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
}
}
}
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
- heap: Heap);
+ heap: Heap,
+ content_ty: ty::t);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: Box<Cleanup>);
} else {
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
- val, cleanup::HeapExchange);
+ val, cleanup::HeapExchange, contents_ty);
let bcx = trans_into(bcx, contents, SaveIn(val));
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
bcx
let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
- bx, cleanup::HeapManaged);
+ bx, cleanup::HeapManaged, contents_ty);
let bcx = trans_into(bcx, contents, SaveIn(body));
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
immediate_rvalue_bcx(bcx, bx, box_ty).to_expr_datumblock()
let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
let ptr = Load(bcx, datum.val);
if !type_is_zero_size(bcx.ccx(), content_ty) {
- bcx.fcx.schedule_free_value(scope, ptr, cleanup::HeapExchange);
+ bcx.fcx.schedule_free_value(scope, ptr, cleanup::HeapExchange, content_ty);
}
}
RvalueExpr(Rvalue { mode: ByValue }) => {
let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
if !type_is_zero_size(bcx.ccx(), content_ty) {
- bcx.fcx.schedule_free_value(scope, datum.val, cleanup::HeapExchange);
+ bcx.fcx.schedule_free_value(scope, datum.val, cleanup::HeapExchange,
+ content_ty);
}
}
LvalueExpr => { }
Some(expr::Ignore)).bcx
}
-pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef)
- -> &'a Block<'a> {
+fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
+ align: u64) -> &'a Block<'a> {
let _icx = push_ctxt("trans_exchange_free");
+ let ccx = cx.ccx();
callee::trans_lang_call(cx,
langcall(cx, None, "", ExchangeFreeFnLangItem),
- [PointerCast(cx, v, Type::i8p(cx.ccx()))],
+ [PointerCast(cx, v, Type::i8p(ccx)), C_uint(ccx, size as uint), C_uint(ccx, align as uint)],
Some(expr::Ignore)).bcx
}
+pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
+ content_ty: ty::t) -> &'a Block<'a> {
+ let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
+ let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
+
+ // `Box<ZeroSizeType>` does not allocate.
+ if content_size != 0 {
+ let content_align = llalign_of_min(bcx.ccx(), sizing_type);
+ trans_exchange_free(bcx, ptr, content_size, content_align)
+ } else {
+ bcx
+ }
+}
+
pub fn take_ty<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
-> &'a Block<'a> {
// NB: v is an *alias* of type t here, not a direct value.
ty::ty_vec(_, None) | ty::ty_str => t,
_ => {
let llty = sizing_type_of(ccx, typ);
- // Unique boxes do not allocate for zero-size types. The standard
- // library may assume that `free` is never called on the pointer
- // returned for `Box<ZeroSizeType>`.
+ // `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
ty::mk_uniq(tcx, ty::mk_i8())
}
- }
- }
}
+ }
+ }
_ => t
}
}
ty::ty_vec(mt, None) => {
with_cond(bcx, not_null, |bcx| {
let bcx = tvec::make_drop_glue_unboxed(bcx, llbox, mt.ty);
- trans_exchange_free(bcx, llbox)
+ // FIXME: #13994: the old `Box<[T]>` will not support sized deallocation
+ trans_exchange_free(bcx, llbox, 0, 8)
})
}
ty::ty_str => {
with_cond(bcx, not_null, |bcx| {
let unit_ty = ty::sequence_element_type(bcx.tcx(), t);
let bcx = tvec::make_drop_glue_unboxed(bcx, llbox, unit_ty);
- trans_exchange_free(bcx, llbox)
+ // FIXME: #13994: the old `Box<str>` will not support sized deallocation
+ trans_exchange_free(bcx, llbox, 0, 8)
})
}
_ => {
with_cond(bcx, not_null, |bcx| {
let bcx = drop_ty(bcx, llbox, content_ty);
- trans_exchange_free(bcx, llbox)
+ trans_exchange_free_ty(bcx, llbox, content_ty)
})
}
}
Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], []);
// Free the environment itself
- trans_exchange_free(bcx, env)
+ // FIXME: #13994: pass align and size here
+ trans_exchange_free(bcx, env, 0, 8)
})
}
_ => {
// Create a temporary scope lest execution should fail while
// constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope();
+
+ // FIXME: #13994: the old `Box<[T]> will not support sized deallocation, this is a placeholder
+ let content_ty = vt.unit_ty;
fcx.schedule_free_value(cleanup::CustomScope(temp_scope),
- val, cleanup::HeapExchange);
+ val, cleanup::HeapExchange, content_ty);
let dataptr = get_dataptr(bcx, val);
}
}))
}
- this.tcx().sess.span_bug(path.span,
+ this.tcx().sess.span_err(path.span,
"not enough type parameters \
- supplied to `Box<T>`")
+ supplied to `Box<T>`");
+ Some(ty::mk_err())
}
_ => None
}
param_ty: param_ty) {
debug!("push_inherent_candidates_from_param(param_ty={:?})",
param_ty);
- self.push_inherent_candidates_from_bounds(
- rcvr_ty,
- self.fcx
- .inh
- .param_env
- .type_param_bounds
- .get(param_ty.idx)
- .trait_bounds
- .as_slice(),
- restrict_to,
- param_numbered(param_ty.idx));
+ let i = param_ty.idx;
+ match self.fcx.inh.param_env.type_param_bounds.as_slice().get(i) {
+ Some(b) => self.push_inherent_candidates_from_bounds(
+ rcvr_ty, b.trait_bounds.as_slice(), restrict_to,
+ param_numbered(param_ty.idx)),
+ None => {}
+ }
}
if !checked {
tcx.sess.span_err(expr.span,
"only the managed heap and exchange heap are \
- currently supported")
+ currently supported");
+ fcx.write_ty(id, ty::mk_err());
}
}
/// When rendering traits, it's often useful to be able to list all
/// implementors of the trait, and this mapping is exactly, that: a mapping
/// of trait ids to the list of known implementors of the trait
- pub implementors: HashMap<ast::NodeId, Vec<Implementor> >,
+ pub implementors: HashMap<ast::NodeId, Vec<Implementor>>,
+
+ /// Implementations of external traits, keyed by the external trait def id.
+ pub foreign_implementors: HashMap<ast::DefId, Vec<Implementor>>,
/// Cache of where external crate documentation can be found.
pub extern_locations: HashMap<ast::CrateNum, ExternalLocation>,
paths: paths,
traits: HashMap::new(),
implementors: HashMap::new(),
+ foreign_implementors: HashMap::new(),
stack: Vec::new(),
parent_stack: Vec::new(),
search_index: Vec::new(),
try!(write(cx.dst.join("Heuristica-Bold.woff"),
include_bin!("static/Heuristica-Bold.woff")));
- // Update the search index
- let dst = cx.dst.join("search-index.js");
- let mut all_indexes = Vec::new();
- all_indexes.push(index);
- if dst.exists() {
- for line in BufferedReader::new(File::open(&dst)).lines() {
- let line = try!(line);
- if !line.starts_with("searchIndex") { continue }
- if line.starts_with(format!("searchIndex['{}']", krate.name)) {
- continue
+ fn collect(path: &Path, krate: &str,
+ key: &str) -> io::IoResult<Vec<StrBuf>> {
+ let mut ret = Vec::new();
+ if path.exists() {
+ for line in BufferedReader::new(File::open(path)).lines() {
+ let line = try!(line);
+ if !line.starts_with(key) { continue }
+ if line.starts_with(format!("{}['{}']", key, krate)) {
+ continue
+ }
+ ret.push(line.to_strbuf());
}
- all_indexes.push(line);
}
+ return Ok(ret);
}
+
+ // Update the search index
+ let dst = cx.dst.join("search-index.js");
+ let all_indexes = try!(collect(&dst, krate.name.as_slice(),
+ "searchIndex"));
let mut w = try!(File::create(&dst));
try!(writeln!(&mut w, r"var searchIndex = \{\};"));
+ try!(writeln!(&mut w, "{}", index));
for index in all_indexes.iter() {
try!(writeln!(&mut w, "{}", *index));
}
try!(writeln!(&mut w, "initSearch(searchIndex);"));
+
+ // Update the list of all implementors for traits
+ let dst = cx.dst.join("implementors");
+ try!(mkdir(&dst));
+ for (&did, imps) in cache.foreign_implementors.iter() {
+ let &(ref remote_path, remote_item_type) = cache.paths.get(&did);
+
+ let mut mydst = dst.clone();
+ for part in remote_path.slice_to(remote_path.len() - 1).iter() {
+ mydst.push(part.as_slice());
+ try!(mkdir(&mydst));
+ }
+ mydst.push(format!("{}.{}.js",
+ remote_item_type.to_static_str(),
+ *remote_path.get(remote_path.len() - 1)));
+ let all_implementors = try!(collect(&mydst, krate.name.as_slice(),
+ "implementors"));
+
+ try!(mkdir(&mydst.dir_path()));
+ let mut f = BufferedWriter::new(try!(File::create(&mydst)));
+ try!(writeln!(&mut f, r"(function() \{var implementors = \{\};"));
+
+ for implementor in all_implementors.iter() {
+ try!(writeln!(&mut f, "{}", *implementor));
+ }
+
+ try!(write!(&mut f, r"implementors['{}'] = \{", krate.name));
+ for imp in imps.iter() {
+ let &(ref path, item_type) = match *imp {
+ PathType(clean::ResolvedPath { did, .. }) => {
+ cache.paths.get(&did)
+ }
+ PathType(..) | OtherType(..) => continue,
+ };
+ try!(write!(&mut f, r#"{}:"#, *path.get(path.len() - 1)));
+ try!(write!(&mut f, r#""{}"#,
+ path.slice_to(path.len() - 1).connect("/")));
+ try!(write!(&mut f, r#"/{}.{}.html","#,
+ item_type.to_static_str(),
+ *path.get(path.len() - 1)));
+ }
+ try!(writeln!(&mut f, r"\};"));
+ try!(writeln!(&mut f, "{}", r"
+ if (window.register_implementors) {
+ window.register_implementors(implementors);
+ } else {
+ window.pending_implementors = implementors;
+ }
+ "));
+ try!(writeln!(&mut f, r"\})()"));
+ }
}
// Render all source files (this may turn into a giant no-op)
match i.trait_ {
// FIXME: this is_local() check seems to be losing
// information
- Some(clean::ResolvedPath{ did, .. })
- if ast_util::is_local(did) =>
- {
- let id = did.node;
- let v = self.implementors.find_or_insert_with(id, |_|{
- Vec::new()
- });
+ Some(clean::ResolvedPath{ did, .. }) => {
+ let v = if ast_util::is_local(did) {
+ self.implementors.find_or_insert(did.node, Vec::new())
+ } else {
+ self.foreign_implementors.find_or_insert(did,
+ Vec::new())
+ };
match i.for_ {
clean::ResolvedPath{..} => {
v.unshift(PathType(i.for_.clone()));
}
clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) =>
item_function(fmt, self.item, f),
- clean::TraitItem(ref t) => item_trait(fmt, self.item, t),
+ clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t),
clean::StructItem(ref s) => item_struct(fmt, self.item, s),
clean::EnumItem(ref e) => item_enum(fmt, self.item, e),
clean::TypedefItem(ref t) => item_typedef(fmt, self.item, t),
document(w, it)
}
-fn item_trait(w: &mut fmt::Formatter, it: &clean::Item,
+fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
t: &clean::Trait) -> fmt::Result {
let mut parents = StrBuf::new();
if t.parents.len() > 0 {
Some(implementors) => {
try!(write!(w, "
<h2 id='implementors'>Implementors</h2>
- <ul class='item-list'>
+ <ul class='item-list' id='implementors-list'>
"));
for i in implementors.iter() {
match *i {
}
}
try!(write!(w, "</ul>"));
+ try!(write!(w, r#"<script type="text/javascript" async
+ src="{}/implementors/{}/{}.{}.js"></script>"#,
+ cx.current.iter().map(|_| "..")
+ .collect::<Vec<&str>>().connect("/"),
+ cx.current.connect("/"),
+ shortty(it).to_static_str(),
+ *it.name.get_ref()));
}
None => {}
}
}
window.initSearch = initSearch;
+
+ window.register_implementors = function(imp) {
+ var list = $('#implementors-list');
+ var libs = Object.getOwnPropertyNames(imp);
+ for (var i = 0; i < libs.length; i++) {
+ var structs = Object.getOwnPropertyNames(imp[libs[i]]);
+ for (var j = 0; j < structs.length; j++) {
+ console.log(i, structs[j]);
+ var path = rootPath + imp[libs[i]][structs[j]];
+ var klass = path.contains("type.") ? "type" : "struct";
+ var link = $('<a>').text(structs[j])
+ .attr('href', path)
+ .attr('class', klass);
+ var code = $('<code>').append(link);
+ var li = $('<li>').append(code);
+ list.append(li);
+ }
+ }
+ };
+ if (window.pending_implementors) {
+ window.register_implementors(window.pending_implementors);
+ }
}());
/// It is assumed that all invocations of this struct happen on the same thread
/// (the uv event loop).
+use alloc::arc::Arc;
use std::mem;
use std::rt::local::Local;
use std::rt::task::{BlockedTask, Task};
-use std::sync::arc::UnsafeArc;
+use std::ty::Unsafe;
use homing::HomingMissile;
pub struct Access {
- inner: UnsafeArc<Inner>,
+ inner: Arc<Unsafe<Inner>>,
}
pub struct Guard<'a> {
impl Access {
pub fn new() -> Access {
Access {
- inner: UnsafeArc::new(Inner {
+ inner: Arc::new(Unsafe::new(Inner {
queue: vec![],
held: false,
closed: false,
- })
+ }))
}
}
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate realrustuv = "rustuv";
extern crate libc;
+extern crate alloc;
use libc::{c_int, c_void};
use std::fmt;
#![allow(dead_code)]
+use alloc::arc::Arc;
use libc::c_void;
use std::mem;
use std::rt::task::BlockedTask;
-use std::sync::arc::UnsafeArc;
use std::unstable::mutex::NativeMutex;
use mpsc = std::sync::mpsc_queue;
/// This structure is intended to be stored next to the event loop, and it is
/// used to create new `Queue` structures.
pub struct QueuePool {
- queue: UnsafeArc<State>,
+ queue: Arc<State>,
refcnt: uint,
}
/// This type is used to send messages back to the original event loop.
pub struct Queue {
- queue: UnsafeArc<State>,
+ queue: Arc<State>,
}
extern fn async_cb(handle: *uvll::uv_async_t) {
let pool: &mut QueuePool = unsafe {
mem::transmute(uvll::get_data_for_uv_handle(handle))
};
- let state: &mut State = unsafe { mem::transmute(pool.queue.get()) };
+ let state: &State = &*pool.queue;
// Remember that there is no guarantee about how many times an async
// callback is called with relation to the number of sends, so process the
impl QueuePool {
pub fn new(loop_: &mut Loop) -> Box<QueuePool> {
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
- let state = UnsafeArc::new(State {
+ let state = Arc::new(State {
handle: handle,
lock: unsafe {NativeMutex::new()},
queue: mpsc::Queue::new(),
pub fn queue(&mut self) -> Queue {
unsafe {
if self.refcnt == 0 {
- uvll::uv_ref((*self.queue.get()).handle);
+ uvll::uv_ref(self.queue.handle);
}
self.refcnt += 1;
}
Queue { queue: self.queue.clone() }
}
- pub fn handle(&self) -> *uvll::uv_async_t {
- unsafe { (*self.queue.get()).handle }
- }
+ pub fn handle(&self) -> *uvll::uv_async_t { self.queue.handle }
}
impl Queue {
pub fn push(&mut self, task: BlockedTask) {
- unsafe {
- (*self.queue.get()).queue.push(Task(task));
- uvll::uv_async_send((*self.queue.get()).handle);
- }
+ self.queue.queue.push(Task(task));
+ unsafe { uvll::uv_async_send(self.queue.handle); }
}
}
// that the count is at least one (because we have a queue right here),
// and if the queue is dropped later on it'll see the increment for the
// decrement anyway.
- unsafe {
- (*self.queue.get()).queue.push(Increment);
- }
+ self.queue.queue.push(Increment);
Queue { queue: self.queue.clone() }
}
}
// See the comments in the async_cb function for why there is a lock
// that is acquired only on a drop.
unsafe {
- let state = self.queue.get();
- let _l = (*state).lock.lock();
- (*state).queue.push(Decrement);
- uvll::uv_async_send((*state).handle);
+ let _l = self.queue.lock.lock();
+ self.queue.queue.push(Decrement);
+ uvll::uv_async_send(self.queue.handle);
}
}
}
/// the same underlying uv object, hence Rc is not used and this simple counter
/// should suffice.
-use std::sync::arc::UnsafeArc;
+use alloc::arc::Arc;
+use std::ty::Unsafe;
pub struct Refcount {
- rc: UnsafeArc<uint>,
+ rc: Arc<Unsafe<uint>>,
}
impl Refcount {
/// Creates a new refcount of 1
pub fn new() -> Refcount {
- Refcount { rc: UnsafeArc::new(1) }
+ Refcount { rc: Arc::new(Unsafe::new(1)) }
}
fn increment(&self) {
// And now that you've seen all the races that I found and attempted to fix,
// here's the code for you to find some more!
+use alloc::arc::Arc;
+
use cell::Cell;
use clone::Clone;
use iter::Iterator;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
-use sync::arc::UnsafeArc;
use ty::Unsafe;
pub use comm::select::{Select, Handle};
/// The sending-half of Rust's synchronous channel type. This half can only be
/// owned by one task, but it can be cloned to send to other tasks.
pub struct SyncSender<T> {
- inner: UnsafeArc<sync::Packet<T>>,
+ inner: Arc<Unsafe<sync::Packet<T>>>,
// can't share in an arc
marker: marker::NoShare,
}
}
enum Flavor<T> {
- Oneshot(UnsafeArc<oneshot::Packet<T>>),
- Stream(UnsafeArc<stream::Packet<T>>),
- Shared(UnsafeArc<shared::Packet<T>>),
- Sync(UnsafeArc<sync::Packet<T>>),
+ Oneshot(Arc<Unsafe<oneshot::Packet<T>>>),
+ Stream(Arc<Unsafe<stream::Packet<T>>>),
+ Shared(Arc<Unsafe<shared::Packet<T>>>),
+ Sync(Arc<Unsafe<sync::Packet<T>>>),
}
#[doc(hidden)]
/// println!("{}", rx.recv());
/// ```
pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
- let (a, b) = UnsafeArc::new2(oneshot::Packet::new());
- (Sender::new(Oneshot(b)), Receiver::new(Oneshot(a)))
+ let a = Arc::new(Unsafe::new(oneshot::Packet::new()));
+ (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
}
/// Creates a new synchronous, bounded channel.
/// assert_eq!(rx.recv(), 2);
/// ```
pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
- let (a, b) = UnsafeArc::new2(sync::Packet::new(bound));
- (SyncSender::new(a), Receiver::new(Sync(b)))
+ let a = Arc::new(Unsafe::new(sync::Packet::new(bound)));
+ (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
}
////////////////////////////////////////////////////////////////////////////////
let (new_inner, ret) = match *unsafe { self.inner() } {
Oneshot(ref p) => {
- let p = p.get();
unsafe {
+ let p = p.get();
if !(*p).sent() {
return (*p).send(t);
} else {
- let (a, b) = UnsafeArc::new2(stream::Packet::new());
- match (*p).upgrade(Receiver::new(Stream(b))) {
+ let a = Arc::new(Unsafe::new(stream::Packet::new()));
+ match (*p).upgrade(Receiver::new(Stream(a.clone()))) {
oneshot::UpSuccess => {
let ret = (*a.get()).send(t);
(a, ret)
fn clone(&self) -> Sender<T> {
let (packet, sleeper) = match *unsafe { self.inner() } {
Oneshot(ref p) => {
- let (a, b) = UnsafeArc::new2(shared::Packet::new());
- match unsafe { (*p.get()).upgrade(Receiver::new(Shared(a))) } {
- oneshot::UpSuccess | oneshot::UpDisconnected => (b, None),
- oneshot::UpWoke(task) => (b, Some(task))
+ let a = Arc::new(Unsafe::new(shared::Packet::new()));
+ match unsafe {
+ (*p.get()).upgrade(Receiver::new(Shared(a.clone())))
+ } {
+ oneshot::UpSuccess | oneshot::UpDisconnected => (a, None),
+ oneshot::UpWoke(task) => (a, Some(task))
}
}
Stream(ref p) => {
- let (a, b) = UnsafeArc::new2(shared::Packet::new());
- match unsafe { (*p.get()).upgrade(Receiver::new(Shared(a))) } {
- stream::UpSuccess | stream::UpDisconnected => (b, None),
- stream::UpWoke(task) => (b, Some(task)),
+ let a = Arc::new(Unsafe::new(shared::Packet::new()));
+ match unsafe {
+ (*p.get()).upgrade(Receiver::new(Shared(a.clone())))
+ } {
+ stream::UpSuccess | stream::UpDisconnected => (a, None),
+ stream::UpWoke(task) => (a, Some(task)),
}
}
Shared(ref p) => {
////////////////////////////////////////////////////////////////////////////////
impl<T: Send> SyncSender<T> {
- fn new(inner: UnsafeArc<sync::Packet<T>>) -> SyncSender<T> {
+ fn new(inner: Arc<Unsafe<sync::Packet<T>>>) -> SyncSender<T> {
SyncSender { inner: inner, marker: marker::NoShare }
}
/// this type is to have one and exactly one allocation when the chan/port pair
/// is created.
///
-/// Another possible optimization would be to not use an UnsafeArc box because
+/// Another possible optimization would be to not use an Arc box because
/// in theory we know when the shared packet can be deallocated (no real need
/// for the atomic reference counting), but I was having trouble how to destroy
/// the data early in a drop of a Port.
pub use tuple::{Tuple1, Tuple2, Tuple3, Tuple4};
pub use tuple::{Tuple5, Tuple6, Tuple7, Tuple8};
pub use tuple::{Tuple9, Tuple10, Tuple11, Tuple12};
-pub use slice::{ImmutableEqVector, ImmutableTotalOrdVector, ImmutableCloneableVector};
-pub use slice::{OwnedVector};
-pub use slice::{MutableVector, MutableTotalOrdVector, MutableVectorAllocating};
-pub use slice::{Vector, VectorVector, CloneableVector, ImmutableVector};
+pub use slice::{CloneableVector, ImmutableCloneableVector, MutableCloneableVector};
+pub use slice::{ImmutableVector, MutableVector};
+pub use slice::{ImmutableEqVector, ImmutableTotalOrdVector, MutableTotalOrdVector};
+pub use slice::{Vector, VectorVector, OwnedVector, MutableVectorAllocating};
pub use strbuf::StrBuf;
pub use vec::Vec;
//! local storage, and logging. Even a 'freestanding' Rust would likely want
//! to implement this.
+use alloc::arc::Arc;
+
use cleanup;
use clone::Clone;
use comm::Sender;
use rt::rtio::LocalIo;
use rt::unwind::Unwinder;
use str::SendStr;
-use sync::arc::UnsafeArc;
use sync::atomics::{AtomicUint, SeqCst};
use task::{TaskResult, TaskOpts};
use unstable::finally::Finally;
/// at any time.
pub enum BlockedTask {
Owned(Box<Task>),
- Shared(UnsafeArc<AtomicUint>),
+ Shared(Arc<AtomicUint>),
}
pub enum DeathAction {
}
pub struct BlockedTasks {
- inner: UnsafeArc<AtomicUint>,
+ inner: Arc<AtomicUint>,
}
impl Task {
pub fn wake(self) -> Option<Box<Task>> {
match self {
Owned(task) => Some(task),
- Shared(arc) => unsafe {
- match (*arc.get()).swap(0, SeqCst) {
+ Shared(arc) => {
+ match arc.swap(0, SeqCst) {
0 => None,
- n => Some(mem::transmute(n)),
+ n => Some(unsafe { mem::transmute(n) }),
}
}
}
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(mem::transmute(task)) };
- UnsafeArc::new(flag)
+ Arc::new(flag)
}
Shared(arc) => arc.clone(),
};
if blocked_task_ptr & 0x1 == 0 {
Owned(mem::transmute(blocked_task_ptr))
} else {
- let ptr: Box<UnsafeArc<AtomicUint>> =
+ let ptr: Box<Arc<AtomicUint>> =
mem::transmute(blocked_task_ptr & !1);
Shared(*ptr)
}
use option::{None, Option, Some};
use ptr::RawPtr;
use ptr;
-use rt::heap::{exchange_malloc, deallocate};
+use rt::heap::{allocate, deallocate};
use unstable::finally::try_finally;
use vec::Vec;
unsafe {
// this should pass the real required alignment
- let ret = exchange_malloc(size, 8) as *mut RawVec<()>;
+ let ret = allocate(size, 8) as *mut RawVec<()>;
let a_size = mem::size_of::<T>();
let a_size = if a_size == 0 {1} else {a_size};
assert_eq!(v_b[0], 2);
assert_eq!(v_b[1], 3);
- // Test on exchange heap.
+ // Test `Box<[T]>`
let vec_unique = box [1, 2, 3, 4, 5, 6];
let v_d = vec_unique.slice(1u, 6u).to_owned();
assert_eq!(v_d.len(), 5u);
+++ /dev/null
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Atomically reference counted data
-//!
-//! This modules contains the implementation of an atomically reference counted
-//! pointer for the purpose of sharing data between tasks. This is obviously a
-//! very unsafe primitive to use, but it has its use cases when implementing
-//! concurrent data structures and similar tasks.
-//!
-//! Great care must be taken to ensure that data races do not arise through the
-//! usage of `UnsafeArc`, and this often requires some form of external
-//! synchronization. The only guarantee provided to you by this class is that
-//! the underlying data will remain valid (not free'd) so long as the reference
-//! count is greater than one.
-
-use clone::Clone;
-use iter::Iterator;
-use kinds::Send;
-use mem;
-use ops::Drop;
-use owned::Box;
-use ptr::RawPtr;
-use sync::atomics::{fence, AtomicUint, Relaxed, Acquire, Release};
-use ty::Unsafe;
-use vec::Vec;
-
-/// An atomically reference counted pointer.
-///
-/// Enforces no shared-memory safety.
-#[unsafe_no_drop_flag]
-pub struct UnsafeArc<T> {
- data: *mut ArcData<T>,
-}
-
-struct ArcData<T> {
- count: AtomicUint,
- data: Unsafe<T>,
-}
-
-unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
- let data = box ArcData {
- count: AtomicUint::new(refcount),
- data: Unsafe::new(data)
- };
- mem::transmute(data)
-}
-
-impl<T: Send> UnsafeArc<T> {
- /// Creates a new `UnsafeArc` which wraps the given data.
- pub fn new(data: T) -> UnsafeArc<T> {
- unsafe { UnsafeArc { data: new_inner(data, 1) } }
- }
-
- /// As new(), but returns an extra pre-cloned handle.
- pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
- unsafe {
- let ptr = new_inner(data, 2);
- (UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
- }
- }
-
- /// As new(), but returns a vector of as many pre-cloned handles as
- /// requested.
- pub fn newN(data: T, num_handles: uint) -> Vec<UnsafeArc<T>> {
- unsafe {
- if num_handles == 0 {
- vec![] // need to free data here
- } else {
- let ptr = new_inner(data, num_handles);
- let v = Vec::from_fn(num_handles, |_| UnsafeArc { data: ptr });
- v
- }
- }
- }
-
- /// Gets a pointer to the inner shared data. Note that care must be taken to
- /// ensure that the outer `UnsafeArc` does not fall out of scope while this
- /// pointer is in use, otherwise it could possibly contain a use-after-free.
- #[inline]
- pub fn get(&self) -> *mut T {
- unsafe {
- debug_assert!((*self.data).count.load(Relaxed) > 0);
- return (*self.data).data.get();
- }
- }
-
- /// Gets an immutable pointer to the inner shared data. This has the same
- /// caveats as the `get` method.
- #[inline]
- pub fn get_immut(&self) -> *T {
- unsafe {
- debug_assert!((*self.data).count.load(Relaxed) > 0);
- return (*self.data).data.get() as *T;
- }
- }
-
- /// checks if this is the only reference to the arc protected data
- #[inline]
- pub fn is_owned(&self) -> bool {
- unsafe {
- (*self.data).count.load(Relaxed) == 1
- }
- }
-}
-
-impl<T: Send> Clone for UnsafeArc<T> {
- fn clone(&self) -> UnsafeArc<T> {
- unsafe {
- // Using a relaxed ordering is alright here, as knowledge of the original reference
- // prevents other threads from erroneously deleting the object.
- //
- // As explained in the [Boost documentation][1],
- // Increasing the reference counter can always be done with memory_order_relaxed: New
- // references to an object can only be formed from an existing reference, and passing
- // an existing reference from one thread to another must already provide any required
- // synchronization.
- // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- let old_count = (*self.data).count.fetch_add(1, Relaxed);
- debug_assert!(old_count >= 1);
- return UnsafeArc { data: self.data };
- }
- }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for UnsafeArc<T>{
- fn drop(&mut self) {
- unsafe {
- // Happens when destructing an unwrapper's handle and from
- // `#[unsafe_no_drop_flag]`
- if self.data.is_null() {
- return
- }
- // Because `fetch_sub` is already atomic, we do not need to synchronize with other
- // threads unless we are going to delete the object.
- let old_count = (*self.data).count.fetch_sub(1, Release);
- debug_assert!(old_count >= 1);
- if old_count == 1 {
- // This fence is needed to prevent reordering of use of the data and deletion of
- // the data. Because it is marked `Release`, the decreasing of the reference count
- // sychronizes with this `Acquire` fence. This means that use of the data happens
- // before decreasing the refernce count, which happens before this fence, which
- // happens before the deletion of the data.
- //
- // As explained in the [Boost documentation][1],
- // It is important to enforce any possible access to the object in one thread
- // (through an existing reference) to *happen before* deleting the object in a
- // different thread. This is achieved by a "release" operation after dropping a
- // reference (any access to the object through this reference must obviously
- // happened before), and an "acquire" operation before deleting the object.
- // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- fence(Acquire);
- let _: Box<ArcData<T>> = mem::transmute(self.data);
- }
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use prelude::*;
- use super::UnsafeArc;
- use mem::size_of;
-
- #[test]
- fn test_size() {
- assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
- }
-
- #[test]
- fn arclike_newN() {
- // Tests that the many-refcounts-at-once constructors don't leak.
- let _ = UnsafeArc::new2("hello".to_owned().to_owned());
- let x = UnsafeArc::newN("hello".to_owned().to_owned(), 0);
- assert_eq!(x.len(), 0)
- let x = UnsafeArc::newN("hello".to_owned().to_owned(), 1);
- assert_eq!(x.len(), 1)
- let x = UnsafeArc::newN("hello".to_owned().to_owned(), 10);
- assert_eq!(x.len(), 10)
- }
-}
// FIXME: all atomic operations in this module use a SeqCst ordering. That is
// probably overkill
+use alloc::arc::Arc;
+
use clone::Clone;
use iter::{range, Iterator};
use kinds::Send;
+use kinds::marker;
use mem::{forget, min_align_of, size_of, transmute};
use ops::Drop;
use option::{Option, Some, None};
use owned::Box;
use ptr::RawPtr;
use ptr;
+use rt::heap::{allocate, deallocate};
use slice::ImmutableVector;
-use sync::arc::UnsafeArc;
use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::Exclusive;
-use rt::heap::{allocate, deallocate};
use vec::Vec;
// Once the queue is less than 1/K full, then it will be downsized. Note that
///
/// There may only be one worker per deque.
pub struct Worker<T> {
- deque: UnsafeArc<Deque<T>>,
+ deque: Arc<Deque<T>>,
+ noshare: marker::NoShare,
}
/// The stealing half of the work-stealing deque. Stealers have access to the
/// opposite end of the deque from the worker, and they only have access to the
/// `steal` method.
pub struct Stealer<T> {
- deque: UnsafeArc<Deque<T>>,
+ deque: Arc<Deque<T>>,
+ noshare: marker::NoShare,
}
/// When stealing some data, this is an enumeration of the possible outcomes.
/// Allocates a new work-stealing deque which will send/receiving memory to
/// and from this buffer pool.
- pub fn deque(&mut self) -> (Worker<T>, Stealer<T>) {
- let (a, b) = UnsafeArc::new2(Deque::new(self.clone()));
- (Worker { deque: a }, Stealer { deque: b })
+ pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
+ let a = Arc::new(Deque::new(self.clone()));
+ let b = a.clone();
+ (Worker { deque: a, noshare: marker::NoShare },
+ Stealer { deque: b, noshare: marker::NoShare })
}
- fn alloc(&mut self, bits: int) -> Box<Buffer<T>> {
+ fn alloc(&self, bits: int) -> Box<Buffer<T>> {
unsafe {
self.pool.with(|pool| {
match pool.iter().position(|x| x.size() >= (1 << bits)) {
}
}
- fn free(&mut self, buf: Box<Buffer<T>>) {
+ fn free(&self, buf: Box<Buffer<T>>) {
unsafe {
let mut buf = Some(buf);
self.pool.with(|pool| {
impl<T: Send> Worker<T> {
/// Pushes data onto the front of this work queue.
- pub fn push(&mut self, t: T) {
- unsafe { (*self.deque.get()).push(t) }
+ pub fn push(&self, t: T) {
+ unsafe { self.deque.push(t) }
}
/// Pops data off the front of the work queue, returning `None` on an empty
/// queue.
- pub fn pop(&mut self) -> Option<T> {
- unsafe { (*self.deque.get()).pop() }
+ pub fn pop(&self) -> Option<T> {
+ unsafe { self.deque.pop() }
}
/// Gets access to the buffer pool that this worker is attached to. This can
/// be used to create more deques which share the same buffer pool as this
/// deque.
- pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
- unsafe { &mut (*self.deque.get()).pool }
+ pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
+ &self.deque.pool
}
}
impl<T: Send> Stealer<T> {
/// Steals work off the end of the queue (opposite of the worker's end)
- pub fn steal(&mut self) -> Stolen<T> {
- unsafe { (*self.deque.get()).steal() }
+ pub fn steal(&self) -> Stolen<T> {
+ unsafe { self.deque.steal() }
}
/// Gets access to the buffer pool that this stealer is attached to. This
/// can be used to create more deques which share the same buffer pool as
/// this deque.
- pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
- unsafe { &mut (*self.deque.get()).pool }
+ pub fn pool<'a>(&'a self) -> &'a BufferPool<T> {
+ &self.deque.pool
}
}
impl<T: Send> Clone for Stealer<T> {
- fn clone(&self) -> Stealer<T> { Stealer { deque: self.deque.clone() } }
+ fn clone(&self) -> Stealer<T> {
+ Stealer { deque: self.deque.clone(), noshare: marker::NoShare }
+ }
}
// Almost all of this code can be found directly in the paper so I'm not
// personally going to heavily comment what's going on here.
impl<T: Send> Deque<T> {
- fn new(mut pool: BufferPool<T>) -> Deque<T> {
+ fn new(pool: BufferPool<T>) -> Deque<T> {
let buf = pool.alloc(MIN_BITS);
Deque {
bottom: AtomicInt::new(0),
}
}
- unsafe fn push(&mut self, data: T) {
+ unsafe fn push(&self, data: T) {
let mut b = self.bottom.load(SeqCst);
let t = self.top.load(SeqCst);
let mut a = self.array.load(SeqCst);
self.bottom.store(b + 1, SeqCst);
}
- unsafe fn pop(&mut self) -> Option<T> {
+ unsafe fn pop(&self) -> Option<T> {
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let b = b - 1;
}
}
- unsafe fn steal(&mut self) -> Stolen<T> {
+ unsafe fn steal(&self) -> Stolen<T> {
let t = self.top.load(SeqCst);
let old = self.array.load(SeqCst);
let b = self.bottom.load(SeqCst);
}
}
- unsafe fn maybe_shrink(&mut self, b: int, t: int) {
+ unsafe fn maybe_shrink(&self, b: int, t: int) {
let a = self.array.load(SeqCst);
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
self.swap_buffer(b, a, (*a).resize(b, t, -1));
// after this method has called 'free' on it. The continued usage is simply
// a read followed by a forget, but we must make sure that the memory can
// continue to be read after we flag this buffer for reclamation.
- unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
+ unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = transmute(box buf);
self.array.store(newbuf, SeqCst);
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
- unsafe fn put(&mut self, i: int, t: T) {
+ unsafe fn put(&self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
forget(t);
// Again, unsafe because this has incredibly dubious ownership violations.
// It is assumed that this buffer is immediately dropped.
unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
- let mut buf = Buffer::new(self.log_size + delta);
+ let buf = Buffer::new(self.log_size + delta);
for i in range(t, b) {
buf.put(i, self.get(i));
}
#[test]
fn smoke() {
- let mut pool = BufferPool::new();
- let (mut w, mut s) = pool.deque();
+ let pool = BufferPool::new();
+ let (w, s) = pool.deque();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
#[test]
fn stealpush() {
static AMT: int = 100000;
- let mut pool = BufferPool::<int>::new();
- let (mut w, s) = pool.deque();
+ let pool = BufferPool::<int>::new();
+ let (w, s) = pool.deque();
let t = Thread::start(proc() {
- let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
#[test]
fn stealpush_large() {
static AMT: int = 100000;
- let mut pool = BufferPool::<(int, int)>::new();
- let (mut w, s) = pool.deque();
+ let pool = BufferPool::<(int, int)>::new();
+ let (w, s) = pool.deque();
let t = Thread::start(proc() {
- let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
t.join();
}
- fn stampede(mut w: Worker<Box<int>>, s: Stealer<Box<int>>,
+ fn stampede(w: Worker<Box<int>>, s: Stealer<Box<int>>,
nthreads: int, amt: uint) {
for _ in range(0, amt) {
w.push(box 20);
let s = s.clone();
Thread::start(proc() {
unsafe {
- let mut s = s;
while (*unsafe_remaining).load(SeqCst) > 0 {
match s.steal() {
Data(box 20) => {
#[test]
fn run_stampede() {
- let mut pool = BufferPool::<Box<int>>::new();
+ let pool = BufferPool::<Box<int>>::new();
let (w, s) = pool.deque();
stampede(w, s, 8, 10000);
}
#[test]
fn many_stampede() {
static AMT: uint = 4;
- let mut pool = BufferPool::<Box<int>>::new();
+ let pool = BufferPool::<Box<int>>::new();
let threads = range(0, AMT).map(|_| {
let (w, s) = pool.deque();
Thread::start(proc() {
static NTHREADS: int = 8;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
- let mut pool = BufferPool::<int>::new();
- let (mut w, s) = pool.deque();
+ let pool = BufferPool::<int>::new();
+ let (w, s) = pool.deque();
let threads = range(0, NTHREADS).map(|_| {
let s = s.clone();
Thread::start(proc() {
unsafe {
- let mut s = s;
loop {
match s.steal() {
Data(2) => { HITS.fetch_add(1, SeqCst); }
static AMT: int = 10000;
static NTHREADS: int = 4;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
- let mut pool = BufferPool::<(int, uint)>::new();
- let (mut w, s) = pool.deque();
+ let pool = BufferPool::<(int, uint)>::new();
+ let (w, s) = pool.deque();
let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
let s = s.clone();
};
(Thread::start(proc() {
unsafe {
- let mut s = s;
loop {
match s.steal() {
Data((1, 2)) => {
//! and/or blocking at all, but rather provide the necessary tools to build
//! other types of concurrent primitives.
-pub mod arc;
pub mod atomics;
pub mod deque;
pub mod mpmc_bounded_queue;
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
+use alloc::arc::Arc;
+
use clone::Clone;
use kinds::Send;
use num::next_power_of_two;
use option::{Option, Some, None};
-use sync::arc::UnsafeArc;
use sync::atomics::{AtomicUint,Relaxed,Release,Acquire};
use vec::Vec;
+use ty::Unsafe;
struct Node<T> {
sequence: AtomicUint,
struct State<T> {
pad0: [u8, ..64],
- buffer: Vec<Node<T>>,
+ buffer: Vec<Unsafe<Node<T>>>,
mask: uint,
pad1: [u8, ..64],
enqueue_pos: AtomicUint,
}
pub struct Queue<T> {
- state: UnsafeArc<State<T>>,
+ state: Arc<State<T>>,
}
impl<T: Send> State<T> {
capacity
};
let buffer = Vec::from_fn(capacity, |i| {
- Node { sequence:AtomicUint::new(i), value: None }
+ Unsafe::new(Node { sequence:AtomicUint::new(i), value: None })
});
State{
pad0: [0, ..64],
}
}
- fn push(&mut self, value: T) -> bool {
+ fn push(&self, value: T) -> bool {
let mask = self.mask;
let mut pos = self.enqueue_pos.load(Relaxed);
loop {
- let node = self.buffer.get_mut(pos & mask);
- let seq = node.sequence.load(Acquire);
+ let node = self.buffer.get(pos & mask);
+ let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: int = seq as int - pos as int;
if diff == 0 {
let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed);
if enqueue_pos == pos {
- node.value = Some(value);
- node.sequence.store(pos+1, Release);
+ unsafe {
+ (*node.get()).value = Some(value);
+ (*node.get()).sequence.store(pos+1, Release);
+ }
break
} else {
pos = enqueue_pos;
true
}
- fn pop(&mut self) -> Option<T> {
+ fn pop(&self) -> Option<T> {
let mask = self.mask;
let mut pos = self.dequeue_pos.load(Relaxed);
loop {
- let node = self.buffer.get_mut(pos & mask);
- let seq = node.sequence.load(Acquire);
+ let node = self.buffer.get(pos & mask);
+ let seq = unsafe { (*node.get()).sequence.load(Acquire) };
let diff: int = seq as int - (pos + 1) as int;
if diff == 0 {
let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed);
if dequeue_pos == pos {
- let value = node.value.take();
- node.sequence.store(pos + mask + 1, Release);
- return value
+ unsafe {
+ let value = (*node.get()).value.take();
+ (*node.get()).sequence.store(pos + mask + 1, Release);
+ return value
+ }
} else {
pos = dequeue_pos;
}
impl<T: Send> Queue<T> {
pub fn with_capacity(capacity: uint) -> Queue<T> {
Queue{
- state: UnsafeArc::new(State::with_capacity(capacity))
+ state: Arc::new(State::with_capacity(capacity))
}
}
- pub fn push(&mut self, value: T) -> bool {
- unsafe { (*self.state.get()).push(value) }
+ pub fn push(&self, value: T) -> bool {
+ self.state.push(value)
}
- pub fn pop(&mut self) -> Option<T> {
- unsafe { (*self.state.get()).pop() }
+ pub fn pop(&self) -> Option<T> {
+ self.state.pop()
}
}
impl<T: Send> Clone for Queue<T> {
fn clone(&self) -> Queue<T> {
- Queue {
- state: self.state.clone()
- }
+ Queue { state: self.state.clone() }
}
}
fn test() {
let nthreads = 8u;
let nmsgs = 1000u;
- let mut q = Queue::with_capacity(nthreads*nmsgs);
+ let q = Queue::with_capacity(nthreads*nmsgs);
assert_eq!(None, q.pop());
let (tx, rx) = channel();
let q = q.clone();
let tx = tx.clone();
native::task::spawn(proc() {
- let mut q = q;
+ let q = q;
for i in range(0, nmsgs) {
assert!(q.push(i));
}
completion_rxs.push(rx);
let q = q.clone();
native::task::spawn(proc() {
- let mut q = q;
+ let q = q;
let mut i = 0u;
loop {
match q.pop() {
use owned::Box;
use ptr::RawPtr;
use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
+use ty::Unsafe;
/// A result of the `pop` function.
pub enum PopResult<T> {
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
- tail: *mut Node<T>,
+ tail: Unsafe<*mut Node<T>>,
}
impl<T> Node<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
- tail: stub,
+ tail: Unsafe::new(stub),
}
}
/// Pushes a new value onto this queue.
- pub fn push(&mut self, t: T) {
+ pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, AcqRel);
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
- pub fn pop(&mut self) -> PopResult<T> {
+ pub fn pop(&self) -> PopResult<T> {
unsafe {
- let tail = self.tail;
+ let tail = *self.tail.get();
let next = (*tail).next.load(Acquire);
if !next.is_null() {
- self.tail = next;
+ *self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take_unwrap();
/// Attempts to pop data from this queue, but doesn't attempt too hard. This
/// will canonicalize inconsistent states to a `None` value.
- pub fn casual_pop(&mut self) -> Option<T> {
+ pub fn casual_pop(&self) -> Option<T> {
match self.pop() {
Data(t) => Some(t),
Empty | Inconsistent => None,
impl<T: Send> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
- let mut cur = self.tail;
+ let mut cur = *self.tail.get();
while !cur.is_null() {
let next = (*cur).next.load(Relaxed);
let _: Box<Node<T>> = mem::transmute(cur);
mod tests {
use prelude::*;
+ use alloc::arc::Arc;
+
use native;
use super::{Queue, Data, Empty, Inconsistent};
- use sync::arc::UnsafeArc;
#[test]
fn test_full() {
- let mut q = Queue::new();
+ let q = Queue::new();
q.push(box 1);
q.push(box 2);
}
fn test() {
let nthreads = 8u;
let nmsgs = 1000u;
- let mut q = Queue::new();
+ let q = Queue::new();
match q.pop() {
Empty => {}
Inconsistent | Data(..) => fail!()
}
let (tx, rx) = channel();
- let q = UnsafeArc::new(q);
+ let q = Arc::new(q);
for _ in range(0, nthreads) {
let tx = tx.clone();
let q = q.clone();
native::task::spawn(proc() {
for i in range(0, nmsgs) {
- unsafe { (*q.get()).push(i); }
+ q.push(i);
}
tx.send(());
});
let mut i = 0u;
while i < nthreads * nmsgs {
- match unsafe { (*q.get()).pop() } {
+ match q.pop() {
Empty | Inconsistent => {},
Data(_) => { i += 1 }
}
use owned::Box;
use ptr::RawPtr;
use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
+use ty::Unsafe;
// Node within the linked list queue of messages to send
struct Node<T> {
}
/// The single-producer single-consumer queue. This structure is not cloneable,
-/// but it can be safely shared in an UnsafeArc if it is guaranteed that there
+/// but it can be safely shared in an Arc if it is guaranteed that there
/// is only one popper and one pusher touching the queue at any one point in
/// time.
pub struct Queue<T> {
// consumer fields
- tail: *mut Node<T>, // where to pop from
+ tail: Unsafe<*mut Node<T>>, // where to pop from
tail_prev: AtomicPtr<Node<T>>, // where to pop from
// producer fields
- head: *mut Node<T>, // where to push to
- first: *mut Node<T>, // where to get new nodes from
- tail_copy: *mut Node<T>, // between first/tail
+ head: Unsafe<*mut Node<T>>, // where to push to
+ first: Unsafe<*mut Node<T>>, // where to get new nodes from
+ tail_copy: Unsafe<*mut Node<T>>, // between first/tail
// Cache maintenance fields. Additions and subtractions are stored
// separately in order to allow them to use nonatomic addition/subtraction.
let n2 = Node::new();
unsafe { (*n1).next.store(n2, Relaxed) }
Queue {
- tail: n2,
+ tail: Unsafe::new(n2),
tail_prev: AtomicPtr::new(n1),
- head: n2,
- first: n1,
- tail_copy: n1,
+ head: Unsafe::new(n2),
+ first: Unsafe::new(n1),
+ tail_copy: Unsafe::new(n1),
cache_bound: bound,
cache_additions: AtomicUint::new(0),
cache_subtractions: AtomicUint::new(0),
/// Pushes a new value onto this queue. Note that to use this function
/// safely, it must be externally guaranteed that there is only one pusher.
- pub fn push(&mut self, t: T) {
+ pub fn push(&self, t: T) {
unsafe {
// Acquire a node (which either uses a cached one or allocates a new
// one), and then append this to the 'head' node.
assert!((*n).value.is_none());
(*n).value = Some(t);
(*n).next.store(0 as *mut Node<T>, Relaxed);
- (*self.head).next.store(n, Release);
- self.head = n;
+ (**self.head.get()).next.store(n, Release);
+ *self.head.get() = n;
}
}
- unsafe fn alloc(&mut self) -> *mut Node<T> {
+ unsafe fn alloc(&self) -> *mut Node<T> {
// First try to see if we can consume the 'first' node for our uses.
// We try to avoid as many atomic instructions as possible here, so
// the addition to cache_subtractions is not atomic (plus we're the
// only one subtracting from the cache).
- if self.first != self.tail_copy {
+ if *self.first.get() != *self.tail_copy.get() {
if self.cache_bound > 0 {
let b = self.cache_subtractions.load(Relaxed);
self.cache_subtractions.store(b + 1, Relaxed);
}
- let ret = self.first;
- self.first = (*ret).next.load(Relaxed);
+ let ret = *self.first.get();
+ *self.first.get() = (*ret).next.load(Relaxed);
return ret;
}
// If the above fails, then update our copy of the tail and try
// again.
- self.tail_copy = self.tail_prev.load(Acquire);
- if self.first != self.tail_copy {
+ *self.tail_copy.get() = self.tail_prev.load(Acquire);
+ if *self.first.get() != *self.tail_copy.get() {
if self.cache_bound > 0 {
let b = self.cache_subtractions.load(Relaxed);
self.cache_subtractions.store(b + 1, Relaxed);
}
- let ret = self.first;
- self.first = (*ret).next.load(Relaxed);
+ let ret = *self.first.get();
+ *self.first.get() = (*ret).next.load(Relaxed);
return ret;
}
// If all of that fails, then we have to allocate a new node
/// Attempts to pop a value from this queue. Remember that to use this type
/// safely you must ensure that there is only one popper at a time.
- pub fn pop(&mut self) -> Option<T> {
+ pub fn pop(&self) -> Option<T> {
unsafe {
// The `tail` node is not actually a used node, but rather a
// sentinel from where we should start popping from. Hence, look at
// tail's next field and see if we can use it. If we do a pop, then
// the current tail node is a candidate for going into the cache.
- let tail = self.tail;
+ let tail = *self.tail.get();
let next = (*tail).next.load(Acquire);
if next.is_null() { return None }
assert!((*next).value.is_some());
let ret = (*next).value.take();
- self.tail = next;
+ *self.tail.get() = next;
if self.cache_bound == 0 {
self.tail_prev.store(tail, Release);
} else {
/// Attempts to peek at the head of the queue, returning `None` if the queue
/// has no data currently
- pub fn peek<'a>(&'a mut self) -> Option<&'a mut T> {
+ pub fn peek<'a>(&'a self) -> Option<&'a mut T> {
// This is essentially the same as above with all the popping bits
// stripped out.
unsafe {
- let tail = self.tail;
+ let tail = *self.tail.get();
let next = (*tail).next.load(Acquire);
if next.is_null() { return None }
return (*next).value.as_mut();
impl<T: Send> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
- let mut cur = self.first;
+ let mut cur = *self.first.get();
while !cur.is_null() {
let next = (*cur).next.load(Relaxed);
let _n: Box<Node<T>> = mem::transmute(cur);
#[cfg(test)]
mod test {
use prelude::*;
+
+ use alloc::arc::Arc;
use native;
+
use super::Queue;
- use sync::arc::UnsafeArc;
#[test]
fn smoke() {
- let mut q = Queue::new(0);
+ let q = Queue::new(0);
q.push(1);
q.push(2);
assert_eq!(q.pop(), Some(1));
#[test]
fn drop_full() {
- let mut q = Queue::new(0);
+ let q = Queue::new(0);
q.push(box 1);
q.push(box 2);
}
#[test]
fn smoke_bound() {
- let mut q = Queue::new(1);
+ let q = Queue::new(1);
q.push(1);
q.push(2);
assert_eq!(q.pop(), Some(1));
stress_bound(1);
fn stress_bound(bound: uint) {
- let (a, b) = UnsafeArc::new2(Queue::new(bound));
+ let a = Arc::new(Queue::new(bound));
+ let b = a.clone();
let (tx, rx) = channel();
native::task::spawn(proc() {
for _ in range(0, 100000) {
loop {
- match unsafe { (*b.get()).pop() } {
+ match b.pop() {
Some(1) => break,
Some(_) => fail!(),
None => {}
tx.send(());
});
for _ in range(0, 100000) {
- unsafe { (*a.get()).push(1); }
+ a.push(1);
}
rx.recv();
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use alloc::arc::Arc;
+
use clone::Clone;
use kinds::Send;
-use sync::arc::UnsafeArc;
+use ty::Unsafe;
use unstable::mutex::NativeMutex;
struct ExData<T> {
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
*/
pub struct Exclusive<T> {
- x: UnsafeArc<ExData<T>>
+ x: Arc<Unsafe<ExData<T>>>
}
impl<T:Send> Clone for Exclusive<T> {
data: user_data
};
Exclusive {
- x: UnsafeArc::new(data)
+ x: Arc::new(Unsafe::new(data))
}
}
InternedString::new("doc"),
token::intern_and_get_ident(strip_doc_comment_decoration(
comment.get()).as_slice()));
- mk_attr(meta)
+ if self.node.style == ast::AttrOuter {
+ mk_attr_outer(meta)
+ } else {
+ mk_attr_inner(meta)
+ }
} else {
*self
}
@dummy_spanned(MetaWord(name))
}
-pub fn mk_attr(item: @MetaItem) -> Attribute {
+/// Returns an inner attribute with the given value.
+pub fn mk_attr_inner(item: @MetaItem) -> Attribute {
dummy_spanned(Attribute_ {
style: ast::AttrInner,
value: item,
})
}
+/// Returns an outer attribute with the given value.
+pub fn mk_attr_outer(item: @MetaItem) -> Attribute {
+ dummy_spanned(Attribute_ {
+ style: ast::AttrOuter,
+ value: item,
+ is_sugared_doc: false,
+ })
+}
+
pub fn mk_sugared_doc_attr(text: InternedString, lo: BytePos, hi: BytePos)
-> Attribute {
let style = doc_comment_style(text.get());
_ => {}
}
- if !is_ident_or_path(&self.token)
+ if (!is_ident_or_path(&self.token) && self.token != token::MOD_SEP)
|| self.is_keyword(keywords::True)
|| self.is_keyword(keywords::False) {
// Parse an expression pattern or exp .. exp.
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let a = Some(box 1);
+ match a {
+ Ok(a) => //~ ERROR: mismatched types
+ println!("{}",a), //~ ERROR: failed to find an implementation of trait
+ None => fail!()
+ }
+}
+
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum IntList {
+ Cons(int, Box<IntList>),
+ Nil
+}
+
+fn tail(source_list: &IntList) -> IntList {
+ match source_list {
+ &Cons(val, box ref next_list) => tail(next_list),
+ &Cons(val, box Nil) => Cons(val, box Nil),
+ //~^ ERROR: unreachable pattern
+ _ => fail!()
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let sl = vec![1,2,3];
+ let v: int = match sl.as_slice() {
+ [] => 0,
+ [a,b,c] => 3,
+ [a, ..rest] => a,
+ [10,a, ..rest] => 10 //~ ERROR: unreachable pattern
+ };
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn match_vecs<'a, T>(l1: &'a [T], l2: &'a [T]) {
+ match (l1, l2) {
+ ([], []) => println!("both empty"),
+ ([], [hd, ..tl]) | ([hd, ..tl], []) => println!("one empty"),
+ //~^ ERROR: cannot move out of dereference
+ //~^^ ERROR: cannot move out of dereference
+ ([hd1, ..tl1], [hd2, ..tl2]) => println!("both nonempty"),
+ //~^ ERROR: cannot move out of dereference
+ //~^^ ERROR: cannot move out of dereference
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern: missing `Self` type param in the substitution of `fn(Self)`
+
+trait Trait {
+ fn outer(self) {
+ fn inner(_: Self) {
+ }
+ }
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ box ( () ) 0;
+ //~^ ERROR: only the managed heap and exchange heap are currently supported
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn fn1(0: Box) {} //~ ERROR: not enough type parameters supplied to `Box<T>`
+
+fn main() {}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Whatever {
+}
+
+fn foo(x: Whatever) {
+ match x {
+ Some(field) => field.access(),
+ //~^ ERROR: mismatched types: expected `Whatever` but found
+ //~^^ ERROR: does not implement any method in scope named `access`
+ }
+}
+
+fn main(){}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(globs)]
+
+use self::*; //~ ERROR: unresolved import
+
+fn main() {
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern "Rust" fn foo() {}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate collections;
+extern crate std;
+
+use collections::Bitv;
+
+fn main() {
+ // Generate sieve of Eratosthenes for n up to 1e6
+ let n = 1000000u;
+ let sieve = Bitv::new(n+1, true);
+ let limit: uint = (n as f32).sqrt() as uint;
+ for i in range(2, limit+1) {
+ if sieve[i] {
+ let mut j = 0;
+ while i*i + j*i <= n {
+ sieve[i*i+j*i] = false;
+ j += 1;
+ }
+ }
+ }
+ for i in range(2, n+1) {
+ if sieve[i] {
+ }
+ }
+}
+
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Foo {
+ Bar(int),
+ Baz,
+}
+
+enum Other {
+ Other1(Foo),
+ Other2(Foo, Foo),
+}
+
+fn main() {
+ match Baz {
+ ::Bar(3) => fail!(),
+ ::Bar(_) if false => fail!(),
+ ::Bar(..) if false => fail!(),
+ ::Bar(_n) => fail!(),
+ ::Baz => {}
+ }
+ match Bar(3) {
+ ::Bar(3) => {}
+ ::Bar(_) if false => fail!(),
+ ::Bar(..) if false => fail!(),
+ ::Bar(_n) => fail!(),
+ ::Baz => fail!(),
+ }
+ match Bar(4) {
+ ::Bar(3) => fail!(),
+ ::Bar(_) if false => fail!(),
+ ::Bar(..) if false => fail!(),
+ ::Bar(n) => assert_eq!(n, 4),
+ ::Baz => fail!(),
+ }
+
+ match Other1(Baz) {
+ ::Other1(::Baz) => {}
+ ::Other1(::Bar(_)) => {}
+ ::Other2(::Baz, ::Bar(_)) => {}
+ ::Other2(::Bar(..), ::Baz) => {}
+ ::Other2(..) => {}
+ }
+}