branch = master
[submodule "src/gyp"]
path = src/gyp
- url = https://git.chromium.org/external/gyp.git
+ url = https://github.com/brson/gyp.git
*/
impl<T:Freeze+Send> Arc<T> {
/// Create an atomically reference counted wrapper.
+ #[inline]
pub fn new(data: T) -> Arc<T> {
Arc { x: UnsafeArc::new(data) }
}
+ #[inline]
pub fn get<'a>(&'a self) -> &'a T {
unsafe { &*self.x.get_immut() }
}
* object. However, one of the `arc` objects can be sent to another task,
* allowing them to share the underlying data.
*/
+ #[inline]
fn clone(&self) -> Arc<T> {
Arc { x: self.x.clone() }
}
impl<T:Send> Clone for MutexArc<T> {
/// Duplicate a mutex-protected Arc. See arc::clone for more details.
+ #[inline]
fn clone(&self) -> MutexArc<T> {
// NB: Cloning the underlying mutex is not necessary. Its reference
// count would be exactly the same as the shared state's.
impl<T:Freeze + Send> Clone for RWArc<T> {
/// Duplicate a rwlock-protected Arc. See arc::clone for more details.
+ #[inline]
fn clone(&self) -> RWArc<T> {
RWArc { x: self.x.clone() }
}
#[cfg(test)]
mod test {
use comm::{DuplexStream, rendezvous};
- use std::rt::test::run_in_newsched_task;
+ use std::rt::test::run_in_uv_task;
use std::task::spawn_unlinked;
#[test]
fn recv_a_lot() {
// Rendezvous streams should be able to handle any number of messages being sent
- do run_in_newsched_task {
+ do run_in_uv_task {
let (port, chan) = rendezvous();
do spawn {
do 1000000.times { chan.send(()) }
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
- let scope_r = ty_region(tcx, ex.span, ty::expr_ty(tcx, ex));
- this.guarantee_valid(ex.id,
- ex.span,
- base_cmt,
- LoanMutability::from_ast_mutability(mutbl),
- scope_r);
+ let expr_ty = ty::expr_ty(tcx, ex);
+ if !ty::type_is_bot(expr_ty) {
+ let scope_r = ty_region(tcx, ex.span, expr_ty);
+ this.guarantee_valid(ex.id,
+ ex.span,
+ base_cmt,
+ LoanMutability::from_ast_mutability(mutbl),
+ scope_r);
+ }
visit::walk_expr(this, ex, ());
}
return c;
}
}
-pub fn umax(cx: @mut Block, a: ValueRef, b: ValueRef) -> ValueRef {
- let _icx = push_ctxt("umax");
- let cond = ICmp(cx, lib::llvm::IntULT, a, b);
- return Select(cx, cond, b, a);
-}
-
-pub fn umin(cx: @mut Block, a: ValueRef, b: ValueRef) -> ValueRef {
- let _icx = push_ctxt("umin");
- let cond = ICmp(cx, lib::llvm::IntULT, a, b);
- return Select(cx, cond, a, b);
-}
-
-// Given a pointer p, returns a pointer sz(p) (i.e., inc'd by sz bytes).
-// The type of the returned pointer is always i8*. If you care about the
-// return type, use bump_ptr().
-pub fn ptr_offs(bcx: @mut Block, base: ValueRef, sz: ValueRef) -> ValueRef {
- let _icx = push_ctxt("ptr_offs");
- let raw = PointerCast(bcx, base, Type::i8p());
- InBoundsGEP(bcx, raw, [sz])
-}
-
-// Increment a pointer by a given amount and then cast it to be a pointer
-// to a given type.
-pub fn bump_ptr(bcx: @mut Block, t: ty::t, base: ValueRef, sz: ValueRef) ->
- ValueRef {
- let _icx = push_ctxt("bump_ptr");
- let ccx = bcx.ccx();
- let bumped = ptr_offs(bcx, base, sz);
- let typ = type_of(ccx, t).ptr_to();
- PointerCast(bcx, bumped, typ)
-}
// Returns a pointer to the body for the box. The box may be an opaque
// box. The result will be casted to the type of body_t, if it is statically
assert!(heap != heap_exchange);
malloc_general_dyn(bcx, t, heap, llsize_of(bcx.ccx(), ty))
}
-pub fn malloc_boxed(bcx: @mut Block, t: ty::t)
- -> MallocResult {
- malloc_general(bcx, t, heap_managed)
-}
pub fn heap_for_unique(bcx: @mut Block, t: ty::t) -> heap {
if ty::type_contents(bcx.tcx(), t).contains_managed() {
}
}
-pub fn set_glue_inlining(f: ValueRef, t: ty::t) {
- if ty::type_is_structural(t) {
- set_optimize_for_size(f);
- } else { set_always_inline(f); }
-}
-
// Double-check that we never ask LLVM to declare the same symbol twice. It
// silently mangles such symbols, breaking our linkage model.
pub fn note_unique_llvm_symbol(ccx: &mut CrateContext, sym: @str) {
debug!("{} is for type {}", fn_nm, ppaux::ty_to_str(ccx.tcx, t));
note_unique_llvm_symbol(ccx, fn_nm);
let llfn = decl_cdecl_fn(ccx.llmod, fn_nm, llfnty);
- set_glue_inlining(llfn, t);
return llfn;
}
}
}
+impl<T> Clone for *mut T {
+ #[inline]
+ fn clone(&self) -> *mut T {
+ *self
+ }
+}
+
/// Return the first offset `i` such that `f(buf[i]) == true`.
#[inline]
pub unsafe fn position<T>(buf: *T, f: &fn(&T) -> bool) -> uint {
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This is a basic event loop implementation not meant for any "real purposes"
+//! other than testing the scheduler and proving that it's possible to have a
+//! pluggable event loop.
+
+use prelude::*;
+
+use cast;
+use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback};
+use unstable::sync::Exclusive;
+use util;
+
+/// This is the only exported function from this module.
+pub fn event_loop() -> ~EventLoop {
+ ~BasicLoop::new() as ~EventLoop
+}
+
+struct BasicLoop {
+ work: ~[~fn()], // pending work
+ idle: Option<*BasicPausible>, // only one is allowed
+ remotes: ~[(uint, ~fn())],
+ next_remote: uint,
+ messages: Exclusive<~[Message]>
+}
+
+enum Message { RunRemote(uint), RemoveRemote(uint) }
+
+struct Time {
+ sec: u64,
+ nsec: u64,
+}
+
+impl Ord for Time {
+ fn lt(&self, other: &Time) -> bool {
+ self.sec < other.sec || self.nsec < other.nsec
+ }
+}
+
+impl BasicLoop {
+ fn new() -> BasicLoop {
+ BasicLoop {
+ work: ~[],
+ idle: None,
+ next_remote: 0,
+ remotes: ~[],
+ messages: Exclusive::new(~[]),
+ }
+ }
+
+ /// Process everything in the work queue (continually)
+ fn work(&mut self) {
+ while self.work.len() > 0 {
+ for work in util::replace(&mut self.work, ~[]).move_iter() {
+ work();
+ }
+ }
+ }
+
+ fn remote_work(&mut self) {
+ let messages = unsafe {
+ do self.messages.with |messages| {
+ if messages.len() > 0 {
+ Some(util::replace(messages, ~[]))
+ } else {
+ None
+ }
+ }
+ };
+ let messages = match messages {
+ Some(m) => m, None => return
+ };
+ for message in messages.iter() {
+ self.message(*message);
+ }
+ }
+
+ fn message(&mut self, message: Message) {
+ match message {
+ RunRemote(i) => {
+ match self.remotes.iter().find(|& &(id, _)| id == i) {
+ Some(&(_, ref f)) => (*f)(),
+ None => unreachable!()
+ }
+ }
+ RemoveRemote(i) => {
+ match self.remotes.iter().position(|&(id, _)| id == i) {
+ Some(i) => { self.remotes.remove(i); }
+ None => unreachable!()
+ }
+ }
+ }
+ }
+
+ /// Run the idle callback if one is registered
+ fn idle(&mut self) {
+ unsafe {
+ match self.idle {
+ Some(idle) => {
+ if (*idle).active {
+ (*(*idle).work.get_ref())();
+ }
+ }
+ None => {}
+ }
+ }
+ }
+
+ fn has_idle(&self) -> bool {
+ unsafe { self.idle.is_some() && (**self.idle.get_ref()).active }
+ }
+}
+
+impl EventLoop for BasicLoop {
+ fn run(&mut self) {
+ // Not exactly efficient, but it gets the job done.
+ while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() {
+
+ self.work();
+ self.remote_work();
+
+ if self.has_idle() {
+ self.idle();
+ continue
+ }
+
+ unsafe {
+ // We block here if we have no messages to process and we may
+ // receive a message at a later date
+ do self.messages.hold_and_wait |messages| {
+ self.remotes.len() > 0 &&
+ messages.len() == 0 &&
+ self.work.len() == 0
+ }
+ }
+ }
+ }
+
+ fn callback(&mut self, f: ~fn()) {
+ self.work.push(f);
+ }
+
+ // XXX: Seems like a really weird requirement to have an event loop provide.
+ fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback {
+ let callback = ~BasicPausible::new(self);
+ rtassert!(self.idle.is_none());
+ unsafe {
+ let cb_ptr: &*BasicPausible = cast::transmute(&callback);
+ self.idle = Some(*cb_ptr);
+ }
+ return callback as ~PausibleIdleCallback;
+ }
+
+ fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback {
+ let id = self.next_remote;
+ self.next_remote += 1;
+ self.remotes.push((id, f));
+ ~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback
+ }
+
+ /// This has no bindings for local I/O
+ fn io<'a>(&'a mut self, _: &fn(&'a mut IoFactory)) {}
+}
+
+struct BasicRemote {
+ queue: Exclusive<~[Message]>,
+ id: uint,
+}
+
+impl BasicRemote {
+ fn new(queue: Exclusive<~[Message]>, id: uint) -> BasicRemote {
+ BasicRemote { queue: queue, id: id }
+ }
+}
+
+impl RemoteCallback for BasicRemote {
+ fn fire(&mut self) {
+ unsafe {
+ do self.queue.hold_and_signal |queue| {
+ queue.push(RunRemote(self.id));
+ }
+ }
+ }
+}
+
+impl Drop for BasicRemote {
+ fn drop(&mut self) {
+ unsafe {
+ do self.queue.hold_and_signal |queue| {
+ queue.push(RemoveRemote(self.id));
+ }
+ }
+ }
+}
+
+struct BasicPausible {
+ eloop: *mut BasicLoop,
+ work: Option<~fn()>,
+ active: bool,
+}
+
+impl BasicPausible {
+ fn new(eloop: &mut BasicLoop) -> BasicPausible {
+ BasicPausible {
+ active: false,
+ work: None,
+ eloop: eloop,
+ }
+ }
+}
+
+impl PausibleIdleCallback for BasicPausible {
+ fn start(&mut self, f: ~fn()) {
+ rtassert!(!self.active && self.work.is_none());
+ self.active = true;
+ self.work = Some(f);
+ }
+ fn pause(&mut self) {
+ self.active = false;
+ }
+ fn resume(&mut self) {
+ self.active = true;
+ }
+ fn close(&mut self) {
+ self.active = false;
+ self.work = None;
+ }
+}
+
+impl Drop for BasicPausible {
+ fn drop(&mut self) {
+ unsafe {
+ (*self.eloop).idle = None;
+ }
+ }
+}
+
+fn time() -> Time {
+ #[fixed_stack_segment]; #[inline(never)];
+ extern {
+ fn get_time(sec: &mut i64, nsec: &mut i32);
+ }
+ let mut sec = 0;
+ let mut nsec = 0;
+ unsafe { get_time(&mut sec, &mut nsec) }
+
+ Time { sec: sec as u64, nsec: nsec as u64 }
+}
}
impl<W: Writer> Decorator<W> for BufferedWriter<W> {
- fn inner(self) -> W {
- self.inner
- }
+ fn inner(self) -> W { self.inner }
+ fn inner_ref<'a>(&'a self) -> &'a W { &self.inner }
+ fn inner_mut_ref<'a>(&'a mut self) -> &'a mut W { &mut self.inner }
+}
- fn inner_ref<'a>(&'a self) -> &'a W {
- &self.inner
+/// Wraps a Writer and buffers output to it, flushing whenever a newline (0xa,
+/// '\n') is detected.
+///
+/// Note that this structure does NOT flush the output when dropped.
+pub struct LineBufferedWriter<W> {
+ priv inner: BufferedWriter<W>,
+}
+
+impl<W: Writer> LineBufferedWriter<W> {
+ /// Creates a new `LineBufferedWriter`
+ pub fn new(inner: W) -> LineBufferedWriter<W> {
+ // Lines typically aren't that long, don't use a giant buffer
+ LineBufferedWriter {
+ inner: BufferedWriter::with_capacity(1024, inner)
+ }
}
+}
- fn inner_mut_ref<'a>(&'a mut self) -> &'a mut W {
- &mut self.inner
+impl<W: Writer> Writer for LineBufferedWriter<W> {
+ fn write(&mut self, buf: &[u8]) {
+ match buf.iter().position(|&b| b == '\n' as u8) {
+ Some(i) => {
+ self.inner.write(buf.slice_to(i + 1));
+ self.inner.flush();
+ self.inner.write(buf.slice_from(i + 1));
+ }
+ None => self.inner.write(buf),
+ }
}
+
+ fn flush(&mut self) { self.inner.flush() }
+}
+
+impl<W: Writer> Decorator<W> for LineBufferedWriter<W> {
+ fn inner(self) -> W { self.inner.inner() }
+ fn inner_ref<'a>(&'a self) -> &'a W { self.inner.inner_ref() }
+ fn inner_mut_ref<'a>(&'a mut self) -> &'a mut W { self.inner.inner_mut_ref() }
}
struct InternalBufferedWriter<W>(BufferedWriter<W>);
assert_eq!(reader.read_until(8), Some(~[0]));
assert_eq!(reader.read_until(9), None);
}
+
+ #[test]
+ fn test_line_buffer() {
+ let mut writer = LineBufferedWriter::new(MemWriter::new());
+ writer.write([0]);
+ assert_eq!(*writer.inner_ref().inner_ref(), ~[]);
+ writer.write([1]);
+ assert_eq!(*writer.inner_ref().inner_ref(), ~[]);
+ writer.flush();
+ assert_eq!(*writer.inner_ref().inner_ref(), ~[0, 1]);
+ writer.write([0, '\n' as u8, 1]);
+ assert_eq!(*writer.inner_ref().inner_ref(), ~[0, 1, 0, '\n' as u8]);
+ writer.flush();
+ assert_eq!(*writer.inner_ref().inner_ref(), ~[0, 1, 0, '\n' as u8, 1]);
+ }
}
detail: None
}
}
+ IoUnavailable => {
+ IoError {
+ kind: IoUnavailable,
+ desc: "I/O is unavailable",
+ detail: None
+ }
+ }
_ => fail!()
}
}
use libc;
use option::{Option, Some, None};
use result::{Ok, Err};
+use rt::io::buffered::{LineBufferedWriter, BufferedWriter};
use rt::rtio::{IoFactory, RtioTTY, RtioFileStream, with_local_io,
CloseAsynchronously};
use super::{Reader, Writer, io_error, IoError, OtherIoError};
do src(libc::STDERR_FILENO, false) |src| { StdWriter { inner: src } }
}
+/// Executes a closure with the local task's handle on stdout. By default, this
+/// stream is a buffering stream, so the handled yielded to the given closure
+/// can be used to flush the stdout stream (if necessary). The buffering used is
+/// line-buffering when stdout is attached to a terminal, and a fixed sized
+/// buffer if it is not attached to a terminal.
+///
+/// Note that handles generated via the `stdout()` function all output to the
+/// same stream, and output among all task may be interleaved as a result of
+/// this. This is provided to have access to the default stream for `print` and
+/// `println` (and the related macros) for this task.
+///
+/// Also note that logging macros do not use this stream. Using the logging
+/// macros will emit output to stderr.
+pub fn with_task_stdout(f: &fn(&mut Writer)) {
+ use rt::local::Local;
+ use rt::task::Task;
+
+ unsafe {
+ // Logging may require scheduling operations, so we can't remove the
+ // task from TLS right now, hence the unsafe borrow. Sad.
+ let task: *mut Task = Local::unsafe_borrow();
+
+ match (*task).stdout_handle {
+ Some(ref mut handle) => f(*handle),
+ None => {
+ let handle = stdout();
+ let mut handle = if handle.isatty() {
+ ~LineBufferedWriter::new(handle) as ~Writer
+ } else {
+ // The default capacity is very large, 64k, but this is just
+ // a stdout stream, and possibly per task, so let's not make
+ // this too expensive.
+ ~BufferedWriter::with_capacity(4096, handle) as ~Writer
+ };
+ f(handle);
+ (*task).stdout_handle = Some(handle);
+ }
+ }
+ }
+}
+
/// Prints a string to the stdout of the current process. No newline is emitted
/// after the string is printed.
pub fn print(s: &str) {
- // XXX: need to see if not caching stdin() is the cause of performance
- // issues, it should be possible to cache a stdout handle in each Task
- // and then re-use that across calls to print/println. Note that the
- // resolution of this comment will affect all of the prints below as
- // well.
- stdout().write(s.as_bytes());
+ do with_task_stdout |io| {
+ io.write(s.as_bytes());
+ }
}
/// Prints a string as a line. to the stdout of the current process. A literal
/// `\n` character is printed to the console after the string.
pub fn println(s: &str) {
- let mut out = stdout();
- out.write(s.as_bytes());
- out.write(['\n' as u8]);
+ do with_task_stdout |io| {
+ io.write(s.as_bytes());
+ io.write(['\n' as u8]);
+ }
}
/// Similar to `print`, but takes a `fmt::Arguments` structure to be compatible
/// with the `format_args!` macro.
pub fn print_args(fmt: &fmt::Arguments) {
- let mut out = stdout();
- fmt::write(&mut out as &mut Writer, fmt);
+ do with_task_stdout |io| {
+ fmt::write(io, fmt);
+ }
}
/// Similar to `println`, but takes a `fmt::Arguments` structure to be
/// compatible with the `format_args!` macro.
pub fn println_args(fmt: &fmt::Arguments) {
- let mut out = stdout();
- fmt::writeln(&mut out as &mut Writer, fmt);
+ do with_task_stdout |io| {
+ fmt::writeln(io, fmt);
+ }
}
/// Representation of a reader of a standard input stream
use libc::exit;
use option::{Some, None, Option};
use rt::io;
+use rt::io::stdio::StdWriter;
+use rt::io::buffered::LineBufferedWriter;
use rt::crate_map::{ModEntry, CrateMap, iter_crate_map, get_crate_map};
use str::StrSlice;
use u32;
/// This logger emits output to the stderr of the process, and contains a lazily
/// initialized event-loop driven handle to the stream.
pub struct StdErrLogger {
- priv handle: Option<io::stdio::StdWriter>,
+ priv handle: Option<LineBufferedWriter<StdWriter>>,
}
impl StdErrLogger {
fn log(&mut self, args: &fmt::Arguments) {
// First time logging? Get a handle to the stderr of this process.
if self.handle.is_none() {
- self.handle = Some(io::stderr());
+ self.handle = Some(LineBufferedWriter::new(io::stderr()));
}
fmt::writeln(self.handle.get_mut_ref() as &mut io::Writer, args);
}
// Internal macros used by the runtime.
mod macros;
+/// Basic implementation of an EventLoop, provides no I/O interfaces
+mod basic;
+
/// The global (exchange) heap.
pub mod global_heap;
fn run(&mut self);
fn callback(&mut self, ~fn());
fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback;
- fn callback_ms(&mut self, ms: u64, ~fn());
fn remote_callback(&mut self, ~fn()) -> ~RemoteCallback;
/// The asynchronous I/O services. Not all event loops may provide one
/// no longer try to go to sleep, but exit instead.
no_sleep: bool,
stack_pool: StackPool,
- /// The event loop used to drive the scheduler and perform I/O
- event_loop: ~EventLoop,
/// The scheduler runs on a special task. When it is not running
/// it is stored here instead of the work queue.
priv sched_task: Option<~Task>,
priv yield_check_count: uint,
/// A flag to tell the scheduler loop it needs to do some stealing
/// in order to introduce randomness as part of a yield
- priv steal_for_yield: bool
+ priv steal_for_yield: bool,
+
+ // n.b. currently destructors of an object are run in top-to-bottom in order
+ // of field declaration. Due to its nature, the pausible idle callback
+ // must have some sort of handle to the event loop, so it needs to get
+ // destroyed before the event loop itself. For this reason, we destroy
+ // the event loop last to ensure that any unsafe references to it are
+ // destroyed before it's actually destroyed.
+
+ /// The event loop used to drive the scheduler and perform I/O
+ event_loop: ~EventLoop,
}
/// An indication of how hard to work on a given operation, the difference
use cell::Cell;
use rt::thread::Thread;
use rt::task::{Task, Sched};
- use rt::rtio::EventLoop;
+ use rt::basic;
use rt::util;
use option::{Some};
#[test]
fn test_schedule_home_states() {
- use rt::uv::uvio::UvEventLoop;
use rt::sleeper_list::SleeperList;
use rt::work_queue::WorkQueue;
use rt::sched::Shutdown;
// Our normal scheduler
let mut normal_sched = ~Scheduler::new(
- ~UvEventLoop::new() as ~EventLoop,
+ basic::event_loop(),
normal_queue,
queues.clone(),
sleepers.clone());
// Our special scheduler
let mut special_sched = ~Scheduler::new_special(
- ~UvEventLoop::new() as ~EventLoop,
+ basic::event_loop(),
special_queue.clone(),
queues.clone(),
sleepers.clone(),
#[test]
fn test_io_callback() {
+ use rt::io::timer;
+
// This is a regression test that when there are no schedulable tasks
// in the work queue, but we are performing I/O, that once we do put
// something in the work queue again the scheduler picks it up and doesn't
// exit before emptying the work queue
- do run_in_newsched_task {
+ do run_in_uv_task {
do spawntask {
- let sched: ~Scheduler = Local::take();
- do sched.deschedule_running_task_and_then |sched, task| {
- let task = Cell::new(task);
- do sched.event_loop.callback_ms(10) {
- rtdebug!("in callback");
- let mut sched: ~Scheduler = Local::take();
- sched.enqueue_blocked_task(task.take());
- Local::put(sched);
- }
- }
+ timer::sleep(10);
}
}
}
use rt::work_queue::WorkQueue;
use rt::sleeper_list::SleeperList;
use rt::stack::StackPool;
- use rt::uv::uvio::UvEventLoop;
use rt::sched::{Shutdown, TaskFromFriend};
use util;
let queues = ~[queue.clone()];
let mut sched = ~Scheduler::new(
- ~UvEventLoop::new() as ~EventLoop,
+ basic::event_loop(),
queue,
queues.clone(),
sleepers.clone());
use rt::borrowck;
use rt::borrowck::BorrowRecord;
use rt::env;
+use rt::io::Writer;
use rt::kill::Death;
use rt::local::Local;
use rt::logging::StdErrLogger;
sched: Option<~Scheduler>,
task_type: TaskType,
// Dynamic borrowck debugging info
- borrow_list: Option<~[BorrowRecord]>
+ borrow_list: Option<~[BorrowRecord]>,
+ stdout_handle: Option<~Writer>,
}
pub enum TaskType {
name: None,
sched: None,
task_type: SchedTask,
- borrow_list: None
+ borrow_list: None,
+ stdout_handle: None,
}
}
coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
sched: None,
task_type: GreenTask(Some(home)),
- borrow_list: None
+ borrow_list: None,
+ stdout_handle: None,
}
}
coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
sched: None,
task_type: GreenTask(Some(home)),
- borrow_list: None
+ borrow_list: None,
+ stdout_handle: None,
}
}
// Run the task main function, then do some cleanup.
do f.finally {
+
// First, destroy task-local storage. This may run user dtors.
//
// FIXME #8302: Dear diary. I'm so tired and confused.
// Destroy remaining boxes. Also may run user dtors.
unsafe { cleanup::annihilate(); }
+
+ // Finally flush and destroy any output handles which the task
+ // owns. There are no boxes here, and no user destructors should
+ // run after this any more.
+ match self.stdout_handle.take() {
+ Some(handle) => {
+ let mut handle = handle;
+ handle.flush();
+ }
+ None => {}
+ }
}
}
impl Drop for Task {
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", borrow::to_uint(self));
- rtassert!(self.destroyed)
+ rtassert!(self.destroyed);
}
}
#[test]
fn rng() {
- do run_in_newsched_task() {
+ do run_in_uv_task() {
use rand::{rng, Rng};
let mut r = rng();
let _ = r.next_u32();
#[test]
fn logging() {
- do run_in_newsched_task() {
+ do run_in_uv_task() {
info!("here i am. logging in a newsched task");
}
}
use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use vec::{OwnedVector, MutableVector, ImmutableVector};
use path::GenericPath;
+use rt::basic;
use rt::sched::Scheduler;
use rt::rtio::EventLoop;
use unstable::{run_in_bare_thread};
}
+pub fn new_test_sched() -> Scheduler {
+
+ let queue = WorkQueue::new();
+ let queues = ~[queue.clone()];
+
+ let mut sched = Scheduler::new(basic::event_loop(),
+ queue,
+ queues,
+ SleeperList::new());
+
+ // Don't wait for the Shutdown message
+ sched.no_sleep = true;
+ return sched;
+}
+
+pub fn run_in_uv_task(f: ~fn()) {
+ let f = Cell::new(f);
+ do run_in_bare_thread {
+ run_in_uv_task_core(f.take());
+ }
+}
+
pub fn run_in_newsched_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
}
}
-pub fn run_in_newsched_task_core(f: ~fn()) {
+pub fn run_in_uv_task_core(f: ~fn()) {
use rt::sched::Shutdown;
sched.bootstrap(task);
}
+pub fn run_in_newsched_task_core(f: ~fn()) {
+
+ use rt::sched::Shutdown;
+
+ let mut sched = ~new_test_sched();
+ let exit_handle = Cell::new(sched.make_handle());
+
+ let on_exit: ~fn(bool) = |exit_status| {
+ exit_handle.take().send(Shutdown);
+ rtassert!(exit_status);
+ };
+ let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
+ task.death.on_exit = Some(on_exit);
+
+ sched.bootstrap(task);
+}
+
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
do run_in_bare_thread {
- let mut sched = ~new_test_uv_sched();
+ let mut sched = ~new_test_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
cleanup_task(task);
}
} as ~PausibleIdleCallback
}
- fn callback_ms(&mut self, ms: u64, f: ~fn()) {
- let mut timer = TimerWatcher::new(self.uvio.uv_loop());
- do timer.start(ms, 0) |timer, status| {
- assert!(status.is_none());
- timer.close(||());
- f();
- }
- }
-
fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback {
~UvRemoteCallback::new(self.uvio.uv_loop(), f) as ~RemoteCallback
}
}
fn isatty(&self) -> bool {
- unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY }
+ unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as c_int }
}
}
#[fixed_stack_segment]; #[inline(never)];
rust_uv_tty_get_winsize(tty, width, height)
}
-pub unsafe fn guess_handle(fd: c_int) -> uv_handle_type {
+// FIXME(#9613) this should return uv_handle_type, not a c_int
+pub unsafe fn guess_handle(fd: c_int) -> c_int {
#[fixed_stack_segment]; #[inline(never)];
rust_uv_guess_handle(fd)
}
fn rust_uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int;
fn rust_uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int,
height: *c_int) -> c_int;
- fn rust_uv_guess_handle(fd: c_int) -> uv_handle_type;
+ fn rust_uv_guess_handle(fd: c_int) -> c_int;
// XXX: see comments in addrinfo.rs
// These should all really be constants...
#[test]
fn select_one() {
- do run_in_newsched_task { select_helper(1, [0]) }
+ do run_in_uv_task { select_helper(1, [0]) }
}
#[test]
// NB. I would like to have a test that tests the first one that is
// ready is the one that's returned, but that can't be reliably tested
// with the randomized behaviour of optimistic_check.
- do run_in_newsched_task { select_helper(2, [1]) }
- do run_in_newsched_task { select_helper(2, [0]) }
- do run_in_newsched_task { select_helper(2, [1,0]) }
+ do run_in_uv_task { select_helper(2, [1]) }
+ do run_in_uv_task { select_helper(2, [0]) }
+ do run_in_uv_task { select_helper(2, [1,0]) }
}
#[test]
fn select_a_lot() {
- do run_in_newsched_task { select_helper(12, [7,8,9]) }
+ do run_in_uv_task { select_helper(12, [7,8,9]) }
}
#[test]
// Sends 10 buffered packets, and uses select to retrieve them all.
// Puts the port in a different spot in the vector each time.
- do run_in_newsched_task {
+ do run_in_uv_task {
let (ports, _) = unzip(range(0u, 10).map(|_| stream::<int>()));
let (port, chan) = stream();
do 10.times { chan.send(31337); }
#[test]
fn select_unkillable() {
- do run_in_newsched_task {
+ do run_in_uv_task {
do task::unkillable { select_helper(2, [1]) }
}
}
select_blocking_helper(false);
fn select_blocking_helper(killable: bool) {
- do run_in_newsched_task {
+ do run_in_uv_task {
let (p1,_c) = oneshot();
let (p2,c2) = oneshot();
let mut ports = [p1,p2];
fn select_racing_senders_helper(killable: bool, send_on_chans: ~[uint]) {
use rt::test::spawntask_random;
- do run_in_newsched_task {
+ do run_in_uv_task {
// A bit of stress, since ordinarily this is just smoke and mirrors.
do 4.times {
let send_on_chans = send_on_chans.clone();
#[test]
fn select_killed() {
- do run_in_newsched_task {
+ do run_in_uv_task {
let (success_p, success_c) = oneshot::<bool>();
let success_c = Cell::new(success_c);
do task::try {
// CPU, *after* the spawner is already switched-back-to (and passes the
// killed check at the start of its timeslice). As far as I know, it's not
// possible to make this race deterministic, or even more likely to happen.
- do run_in_newsched_task {
+ do run_in_uv_task {
do task::try {
do task::spawn {
fail!();
// Tests that when a kill signal is received, 'rekillable' and
// 'unkillable' unwind correctly in conjunction with each other.
- do run_in_newsched_task {
+ do run_in_uv_task {
do task::try {
do task::unkillable {
do task::rekillable {
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_unlinked_unsup_no_fail_down() { // grandchild sends on a port
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let (po, ch) = stream();
let ch = SharedChan::new(ch);
do spawn_unlinked {
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_unlinked_unsup_no_fail_up() { // child unlinked fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
do spawn_unlinked { fail!(); }
}
}
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_unlinked_sup_no_fail_up() { // child unlinked fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
do spawn_supervised { fail!(); }
// Give child a chance to fail-but-not-kill-us.
do 16.times { task::deschedule(); }
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_unlinked_sup_fail_down() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
do spawn_supervised { block_forever(); }
fail!(); // Shouldn't leave a child hanging around.
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_linked_sup_fail_up() { // child fails; parent fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Unidirectional "parenting" shouldn't override bidirectional linked.
// We have to cheat with opts - the interface doesn't support them because
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_linked_sup_fail_down() { // parent fails; child fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// We have to cheat with opts - the interface doesn't support them because
// they don't make sense (redundant with task().supervised()).
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_linked_unsup_fail_up() { // child fails; parent fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Default options are to spawn linked & unsupervised.
do spawn { fail!(); }
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_linked_unsup_fail_down() { // parent fails; child fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Default options are to spawn linked & unsupervised.
do spawn { block_forever(); }
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_linked_unsup_default_opts() { // parent fails; child fails
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Make sure the above test is the same as this one.
let mut builder = task();
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_failure_propagate_grandchild() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Middle task exits; does grandparent's failure propagate across the gap?
do spawn_supervised {
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_failure_propagate_secondborn() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// First-born child exits; does parent's failure propagate to sibling?
do spawn_supervised {
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_failure_propagate_nephew_or_niece() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Our sibling exits; does our failure propagate to sibling's child?
do spawn { // linked
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_linked_sup_propagate_sibling() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result: Result<(),()> = do try {
// Middle sibling exits - does eldest's failure propagate to youngest?
do spawn { // linked
#[test]
fn test_unnamed_task() {
- use rt::test::run_in_newsched_task;
+ use rt::test::run_in_uv_task;
- do run_in_newsched_task {
+ do run_in_uv_task {
do spawn {
do with_task_name |name| {
assert!(name.is_none());
#[test]
fn test_owned_named_task() {
- use rt::test::run_in_newsched_task;
+ use rt::test::run_in_uv_task;
- do run_in_newsched_task {
+ do run_in_uv_task {
let mut t = task();
t.name(~"ada lovelace");
do t.spawn {
#[test]
fn test_static_named_task() {
- use rt::test::run_in_newsched_task;
+ use rt::test::run_in_uv_task;
- do run_in_newsched_task {
+ do run_in_uv_task {
let mut t = task();
t.name("ada lovelace");
do t.spawn {
#[test]
fn test_send_named_task() {
- use rt::test::run_in_newsched_task;
+ use rt::test::run_in_uv_task;
- do run_in_newsched_task {
+ do run_in_uv_task {
let mut t = task();
t.name("ada lovelace".into_send_str());
do t.spawn {
#[test]
fn test_simple_newsched_spawn() {
- use rt::test::run_in_newsched_task;
+ use rt::test::run_in_uv_task;
- do run_in_newsched_task {
+ do run_in_uv_task {
spawn(||())
}
}
#[ignore(reason = "linked failure")]
#[test]
fn test_spawn_watched() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result = do try {
let mut t = task();
t.unlinked();
#[ignore(reason = "linked failure")]
#[test]
fn test_indestructible() {
- use rt::test::run_in_newsched_task;
- do run_in_newsched_task {
+ use rt::test::run_in_uv_task;
+ do run_in_uv_task {
let result = do try {
let mut t = task();
t.watched();
}
}
}
+
+ pub unsafe fn signal(&self) {
+ rust_signal_little_lock(self.l);
+ }
+
+ pub unsafe fn lock_and_wait(&self, f: &fn() -> bool) {
+ do atomically {
+ rust_lock_little_lock(self.l);
+ do (|| {
+ if f() {
+ rust_wait_little_lock(self.l);
+ }
+ }).finally {
+ rust_unlock_little_lock(self.l);
+ }
+ }
+ }
}
struct ExData<T> {
}
}
+ #[inline]
+ pub unsafe fn hold_and_signal(&self, f: &fn(x: &mut T)) {
+ let rec = self.x.get();
+ do (*rec).lock.lock {
+ if (*rec).failed {
+ fail!("Poisoned Exclusive::new - another task failed inside!");
+ }
+ (*rec).failed = true;
+ f(&mut (*rec).data);
+ (*rec).failed = false;
+ (*rec).lock.signal();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn hold_and_wait(&self, f: &fn(x: &T) -> bool) {
+ let rec = self.x.get();
+ do (*rec).lock.lock_and_wait {
+ if (*rec).failed {
+ fail!("Poisoned Exclusive::new - another task failed inside!");
+ }
+ (*rec).failed = true;
+ let result = f(&(*rec).data);
+ (*rec).failed = false;
+ result
+ }
+ }
+
pub fn unwrap(self) -> T {
let Exclusive { x: x } = self;
// Someday we might need to unkillably unwrap an Exclusive, but not today.
externfn!(fn rust_destroy_little_lock(lock: rust_little_lock))
externfn!(fn rust_lock_little_lock(lock: rust_little_lock))
externfn!(fn rust_unlock_little_lock(lock: rust_little_lock))
+externfn!(fn rust_signal_little_lock(lock: rust_little_lock))
+externfn!(fn rust_wait_little_lock(lock: rust_little_lock))
#[cfg(test)]
mod tests {
lock->unlock();
}
+extern "C" void
+rust_wait_little_lock(lock_and_signal *lock) {
+ lock->wait();
+}
+
+extern "C" void
+rust_signal_little_lock(lock_and_signal *lock) {
+ lock->signal();
+}
+
typedef void(startfn)(void*, void*);
class raw_thread: public rust_thread {
return uv_tty_get_winsize(tty, width, height);
}
-extern "C" uv_handle_type
+extern "C" int
rust_uv_guess_handle(int fd) {
return uv_guess_handle(fd);
}
rust_destroy_little_lock
rust_lock_little_lock
rust_unlock_little_lock
+rust_signal_little_lock
+rust_wait_little_lock
tdefl_compress_mem_to_heap
tinfl_decompress_mem_to_heap
rust_uv_ip4_port
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:explicit failure
+
+fn main() {
+ &fail!()
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// In theory, it doesn't matter what order destructors are run in for rust
+// because we have explicit ownership of values meaning that there's no need to
+// run one before another. With unsafe code, however, there may be a safe
+// interface which relies on fields having their destructors run in a particular
+// order. At the time of this writing, std::rt::sched::Scheduler is an example
+// of a structure which contains unsafe handles to FFI-like types, and the
+// destruction order of the fields matters in the sense that some handles need
+// to get destroyed before others.
+//
+// In C++, destruction order happens bottom-to-top in order of field
+// declarations, but we currently run them top-to-bottom. I don't think the
+// order really matters that much as long as we define what it is.
+
+struct A;
+struct B;
+struct C {
+ a: A,
+ b: B,
+}
+
+static mut hit: bool = false;
+
+impl Drop for A {
+ fn drop(&mut self) {
+ unsafe {
+ assert!(!hit);
+ hit = true;
+ }
+ }
+}
+
+impl Drop for B {
+ fn drop(&mut self) {
+ unsafe {
+ assert!(hit);
+ }
+ }
+}
+
+pub fn main() {
+ let _c = C { a: A, b: B };
+}