1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Tasks implemented on top of OS threads
13 //! This module contains the implementation of the 1:1 threading module required
14 //! by rust tasks. This implements the necessary API traits laid out by std::rt
15 //! in order to spawn new tasks and deschedule the current task.
19 use std::rt::bookkeeping;
20 use std::rt::local::Local;
21 use std::rt::mutex::NativeMutex;
24 use std::rt::task::{Task, BlockedTask, TaskOpts};
25 use std::rt::thread::Thread;
30 use std::task::{TaskBuilder, Spawner};
32 /// Creates a new Task which is ready to execute as a 1:1 task.
33 pub fn new(stack_bounds: (uint, uint)) -> Box<Task> {
34 let mut task = box Task::new();
36 ops.stack_bounds = stack_bounds;
37 task.put_runtime(ops);
41 fn ops() -> Box<Ops> {
43 lock: unsafe { NativeMutex::new() },
45 io: io::IoFactory::new(),
46 // these *should* get overwritten
51 /// Spawns a function with the default configuration
52 #[deprecated = "use the native method of NativeTaskBuilder instead"]
53 pub fn spawn(f: proc():Send) {
54 spawn_opts(TaskOpts { name: None, stack_size: None, on_exit: None }, f)
57 /// Spawns a new task given the configuration options and a procedure to run
59 #[deprecated = "use the native method of NativeTaskBuilder instead"]
60 pub fn spawn_opts(opts: TaskOpts, f: proc():Send) {
61 let TaskOpts { name, stack_size, on_exit } = opts;
63 let mut task = box Task::new();
65 task.death.on_exit = on_exit;
67 let stack = stack_size.unwrap_or(rt::min_stack());
71 // Note that this increment must happen *before* the spawn in order to
72 // guarantee that if this task exits it will always end up waiting for the
73 // spawned task to exit.
74 bookkeeping::increment();
76 // Spawning a new OS thread guarantees that __morestack will never get
77 // triggered, but we must manually set up the actual stack bounds once this
78 // function starts executing. This raises the lower limit by a bit because
79 // by the time that this function is executing we've already consumed at
80 // least a little bit of stack (we don't know the exact byte address at
81 // which our stack started).
82 Thread::spawn_stack(stack, proc() {
83 let something_around_the_top_of_the_stack = 1;
84 let addr = &something_around_the_top_of_the_stack as *const int;
85 let my_stack = addr as uint;
87 stack::record_stack_bounds(my_stack - stack + 1024, my_stack);
90 ops.stack_bounds = (my_stack - stack + 1024, my_stack);
94 task.put_runtime(ops);
95 drop(task.run(|| { f.take_unwrap()() }).destroy());
96 bookkeeping::decrement();
100 /// A spawner for native tasks
101 pub struct NativeSpawner;
103 impl Spawner for NativeSpawner {
104 fn spawn(self, opts: TaskOpts, f: proc():Send) {
109 /// An extension trait adding a `native` configuration method to `TaskBuilder`.
110 pub trait NativeTaskBuilder {
111 fn native(self) -> TaskBuilder<NativeSpawner>;
114 impl<S: Spawner> NativeTaskBuilder for TaskBuilder<S> {
115 fn native(self) -> TaskBuilder<NativeSpawner> {
116 self.spawner(NativeSpawner)
120 // This structure is the glue between channels and the 1:1 scheduling mode. This
121 // structure is allocated once per task.
123 lock: NativeMutex, // native synchronization
124 awoken: bool, // used to prevent spurious wakeups
125 io: io::IoFactory, // local I/O factory
127 // This field holds the known bounds of the stack in (lo, hi) form. Not all
128 // native tasks necessarily know their precise bounds, hence this is
130 stack_bounds: (uint, uint),
133 impl rt::Runtime for Ops {
134 fn yield_now(self: Box<Ops>, mut cur_task: Box<Task>) {
135 // put the task back in TLS and then invoke the OS thread yield
136 cur_task.put_runtime(self);
137 Local::put(cur_task);
141 fn maybe_yield(self: Box<Ops>, mut cur_task: Box<Task>) {
142 // just put the task back in TLS, on OS threads we never need to
143 // opportunistically yield b/c the OS will do that for us (preemption)
144 cur_task.put_runtime(self);
145 Local::put(cur_task);
148 fn wrap(self: Box<Ops>) -> Box<Any> {
152 fn stack_bounds(&self) -> (uint, uint) { self.stack_bounds }
154 fn can_block(&self) -> bool { true }
156 // This function gets a little interesting. There are a few safety and
157 // ownership violations going on here, but this is all done in the name of
158 // shared state. Additionally, all of the violations are protected with a
159 // mutex, so in theory there are no races.
161 // The first thing we need to do is to get a pointer to the task's internal
162 // mutex. This address will not be changing (because the task is allocated
163 // on the heap). We must have this handle separately because the task will
164 // have its ownership transferred to the given closure. We're guaranteed,
165 // however, that this memory will remain valid because *this* is the current
166 // task's execution thread.
168 // The next weird part is where ownership of the task actually goes. We
169 // relinquish it to the `f` blocking function, but upon returning this
170 // function needs to replace the task back in TLS. There is no communication
171 // from the wakeup thread back to this thread about the task pointer, and
172 // there's really no need to. In order to get around this, we cast the task
173 // to a `uint` which is then used at the end of this function to cast back
174 // to a `Box<Task>` object. Naturally, this looks like it violates
175 // ownership semantics in that there may be two `Box<Task>` objects.
177 // The fun part is that the wakeup half of this implementation knows to
178 // "forget" the task on the other end. This means that the awakening half of
179 // things silently relinquishes ownership back to this thread, but not in a
180 // way that the compiler can understand. The task's memory is always valid
181 // for both tasks because these operations are all done inside of a mutex.
183 // You'll also find that if blocking fails (the `f` function hands the
184 // BlockedTask back to us), we will `mem::forget` the handles. The
185 // reasoning for this is the same logic as above in that the task silently
186 // transfers ownership via the `uint`, not through normal compiler
189 // On a mildly unrelated note, it should also be pointed out that OS
190 // condition variables are susceptible to spurious wakeups, which we need to
191 // be ready for. In order to accommodate for this fact, we have an extra
192 // `awoken` field which indicates whether we were actually woken up via some
193 // invocation of `reawaken`. This flag is only ever accessed inside the
194 // lock, so there's no need to make it atomic.
195 fn deschedule(mut self: Box<Ops>,
197 mut cur_task: Box<Task>,
198 f: |BlockedTask| -> Result<(), BlockedTask>) {
199 let me = &mut *self as *mut Ops;
200 cur_task.put_runtime(self);
203 let cur_task_dupe = &mut *cur_task as *mut Task;
204 let task = BlockedTask::block(cur_task);
207 let guard = (*me).lock.lock();
208 (*me).awoken = false;
211 while !(*me).awoken {
215 Err(task) => { mem::forget(task.wake()); }
218 let iter = task.make_selectable(times);
219 let guard = (*me).lock.lock();
220 (*me).awoken = false;
222 // Apply the given closure to all of the "selectable tasks",
223 // bailing on the first one that produces an error. Note that
224 // care must be taken such that when an error is occurred, we
225 // may not own the task, so we may still have to wait for the
226 // task to become available. In other words, if task.wake()
227 // returns `None`, then someone else has ownership and we must
228 // wait for their signal.
229 match iter.map(f).filter_map(|a| a.err()).next() {
241 while !(*me).awoken {
245 // re-acquire ownership of the task
246 cur_task = mem::transmute(cur_task_dupe);
249 // put the task back in TLS, and everything is as it once was.
250 Local::put(cur_task);
253 // See the comments on `deschedule` for why the task is forgotten here, and
254 // why it's valid to do so.
255 fn reawaken(mut self: Box<Ops>, mut to_wake: Box<Task>) {
257 let me = &mut *self as *mut Ops;
258 to_wake.put_runtime(self);
259 mem::forget(to_wake);
260 let guard = (*me).lock.lock();
266 fn spawn_sibling(self: Box<Ops>,
267 mut cur_task: Box<Task>,
270 cur_task.put_runtime(self);
271 Local::put(cur_task);
273 task::spawn_opts(opts, f);
276 fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>> {
277 Some(rtio::LocalIo::new(&mut self.io as &mut rtio::IoFactory))
283 use std::rt::local::Local;
284 use std::rt::task::{Task, TaskOpts};
286 use std::task::TaskBuilder;
287 use super::{spawn, spawn_opts, Ops, NativeTaskBuilder};
291 let (tx, rx) = channel();
300 let (tx, rx) = channel::<()>();
305 assert_eq!(rx.recv_opt(), Err(()));
310 let mut opts = TaskOpts::new();
311 opts.name = Some("test".into_maybe_owned());
312 opts.stack_size = Some(20 * 4096);
313 let (tx, rx) = channel();
314 opts.on_exit = Some(proc(r) tx.send(r));
315 spawn_opts(opts, proc() {});
316 assert!(rx.recv().is_ok());
320 fn smoke_opts_fail() {
321 let mut opts = TaskOpts::new();
322 let (tx, rx) = channel();
323 opts.on_exit = Some(proc(r) tx.send(r));
324 spawn_opts(opts, proc() { fail!() });
325 assert!(rx.recv().is_err());
330 let (tx, rx) = channel();
332 for _ in range(0u, 10) { task::deschedule(); }
339 fn spawn_children() {
340 let (tx1, rx) = channel();
342 let (tx2, rx) = channel();
344 let (tx3, rx) = channel();
358 fn spawn_inherits() {
359 let (tx, rx) = channel();
362 let mut task: Box<Task> = Local::take();
363 match task.maybe_take_runtime::<Ops>() {
365 task.put_runtime(ops);
377 fn test_native_builder() {
378 let res = TaskBuilder::new().native().try(proc() {
379 "Success!".to_string()
381 assert_eq!(res.ok().unwrap(), "Success!".to_string());