/nd/
/llvm/
version.md
-*.tex
keywords.md
x86_64-apple-darwin/
x86_64-unknown-linux-gnu/
BASE_DOC_OPTS := --standalone --toc --number-sections
HTML_OPTS = $(BASE_DOC_OPTS) --to=html5 --section-divs --css=rust.css \
- --include-before-body=doc/version_info.html --include-in-header=doc/favicon.inc
-TEX_OPTS = $(BASE_DOC_OPTS) --include-before-body=doc/version.md --to=latex
+ --include-before-body=doc/version_info.html \
+ --include-in-header=doc/favicon.inc --include-after-body=doc/footer.inc
+TEX_OPTS = $(BASE_DOC_OPTS) --include-before-body=doc/version.md \
+ --from=markdown --include-before-body=doc/footer.tex --to=latex
EPUB_OPTS = $(BASE_DOC_OPTS) --to=epub
D := $(S)src/doc
@$(call E, cp: $@)
$(Q)cp -a $< $@ 2> /dev/null
+HTML_DEPS += doc/favicon.inc
+doc/favicon.inc: $(D)/favicon.inc | doc/
+ @$(call E, cp: $@)
+ $(Q)cp -a $< $@ 2> /dev/null
+
doc/full-toc.inc: $(D)/full-toc.inc | doc/
@$(call E, cp: $@)
$(Q)cp -a $< $@ 2> /dev/null
-HTML_DEPS += doc/favicon.inc
-doc/favicon.inc: $(D)/favicon.inc | doc/
+HTML_DEPS += doc/footer.inc
+doc/footer.inc: $(D)/footer.inc | doc/
+ @$(call E, cp: $@)
+ $(Q)cp -a $< $@ 2> /dev/null
+
+doc/footer.tex: $(D)/footer.tex | doc/
@$(call E, cp: $@)
$(Q)cp -a $< $@ 2> /dev/null
$(CFG_PANDOC) $(HTML_OPTS) --include-in-header=doc/full-toc.inc --output=$@
DOCS += doc/rust.tex
-doc/rust.tex: $(D)/rust.md doc/version.md | doc/
+doc/rust.tex: $(D)/rust.md doc/footer.tex doc/version.md | doc/
@$(call E, pandoc: $@)
$(Q)$(CFG_NODE) $(D)/prep.js $< | \
$(CFG_PANDOC) $(TEX_OPTS) --output=$@
$(CFG_PANDOC) $(HTML_OPTS) --output=$@
DOCS += doc/tutorial.tex
-doc/tutorial.tex: $(D)/tutorial.md doc/version.md
+doc/tutorial.tex: $(D)/tutorial.md doc/footer.tex doc/version.md
@$(call E, pandoc: $@)
$(Q)$(CFG_NODE) $(D)/prep.js $< | \
$(CFG_PANDOC) $(TEX_OPTS) --output=$@
--- /dev/null
+<footer><p>
+Copyright © 2011-2014 The Rust Project Developers. Licensed under the
+<a href="http://www.apache.org/licenses/LICENSE-2.0">Apache License, Version 2.0</a>
+or the <a href="http://opensource.org/licenses/MIT">MIT license</a>, at your option.
+</p><p>
+This file may not be copied, modified, or distributed except according to those terms.
+</p></footer>
--- /dev/null
+Copyright © 2011-2014 The Rust Project Developers. Licensed under the
+\href{http://www.apache.org/licenses/LICENSE-2.0}{Apache License,
+Version 2.0} or the \href{http://opensource.org/licenses/MIT}{MIT
+license}, at your option.
+
+This file may not be copied, modified, or distributed except according
+to those terms.
#[start]
fn start(argc: int, argv: **u8) -> int {
- green::start(argc, argv, proc() {
- main();
- })
+ green::start(argc, argv, main)
}
fn main() {}
body {
margin: 0 auto;
padding: 0 15px;
- margin-bottom: 4em;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 14px;
color: #333;
margin: 0 0 10px;
}
+footer {
+ border-top: 1px solid #ddd;
+ font-size: 12px;
+ font-style: italic;
+ padding-top: 4px;
+ margin-top: 4em;
+ margin-bottom: 1em;
+}
+
/* Links layout
========================================================================== */
a {
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
-//! stack-allocation strategy.
+//! stack-allocation strategy. This can be optionally linked in to rust
+//! programs in order to provide M:N functionality inside of 1:1 programs.
//!
-//! This can be optionally linked in to rust programs in order to provide M:N
-//! functionality inside of 1:1 programs.
+//! # Architecture
+//!
+//! An M:N scheduling library implies that there are N OS thread upon which M
+//! "green threads" are multiplexed. In other words, a set of green threads are
+//! all run inside a pool of OS threads.
+//!
+//! With this design, you can achieve _concurrency_ by spawning many green
+//! threads, and you can achieve _parallelism_ by running the green threads
+//! simultaneously on multiple OS threads. Each OS thread is a candidate for
+//! being scheduled on a different core (the source of parallelism), and then
+//! all of the green threads cooperatively schedule amongst one another (the
+//! source of concurrency).
+//!
+//! ## Schedulers
+//!
+//! In order to coordinate among green threads, each OS thread is primarily
+//! running something which we call a Scheduler. Whenever a reference to a
+//! Scheduler is made, it is synonymous to referencing one OS thread. Each
+//! scheduler is bound to one and exactly one OS thread, and the thread that it
+//! is bound to never changes.
+//!
+//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`)
+//! which is the thread pool term from above. A pool of schedulers all share the
+//! work that they create. Furthermore, whenever a green thread is created (also
+//! synonymously referred to as a green task), it is associated with a
+//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool.
+//!
+//! Schedulers can have at most one green thread running on them at a time. When
+//! a scheduler is asleep on its event loop, there are no green tasks running on
+//! the OS thread or the scheduler. The term "context switch" is used for when
+//! the running green thread is swapped out, but this simply changes the one
+//! green thread which is running on the scheduler.
+//!
+//! ## Green Threads
+//!
+//! A green thread can largely be summarized by a stack and a register context.
+//! Whenever a green thread is spawned, it allocates a stack, and then prepares
+//! a register context for execution. The green task may be executed across
+//! multiple OS threads, but it will always use the same stack and it will carry
+//! its register context across OS threads.
+//!
+//! Each green thread is cooperatively scheduled with other green threads.
+//! Primarily, this means that there is no pre-emption of a green thread. The
+//! major consequence of this design is that a green thread stuck in an infinite
+//! loop will prevent all other green threads from running on that particular
+//! scheduler.
+//!
+//! Scheduling events for green threads occur on communication and I/O
+//! boundaries. For example, if a green task blocks waiting for a message on a
+//! channel some other green thread can now run on the scheduler. This also has
+//! the consequence that until a green thread performs any form of scheduling
+//! event, it will be running on the same OS thread (unconditionally).
+//!
+//! ## Work Stealing
+//!
+//! With a pool of schedulers, a new green task has a number of options when
+//! deciding where to run initially. The current implementation uses a concept
+//! called work stealing in order to spread out work among schedulers.
+//!
+//! In a work-stealing model, each scheduler maintains a local queue of tasks to
+//! run, and this queue is stolen from by other schedulers. Implementation-wise,
+//! work stealing has some hairy parts, but from a user-perspective, work
+//! stealing simply implies what with M green threads and N schedulers where
+//! M > N it is very likely that all schedulers will be busy executing work.
+//!
+//! # Considerations when using libgreen
+//!
+//! An M:N runtime has both pros and cons, and there is no one answer as to
+//! whether M:N or 1:1 is appropriate to use. As always, there are many
+//! advantages and disadvantages between the two. Regardless of the workload,
+//! however, there are some aspects of using green thread which you should be
+//! aware of:
+//!
+//! * The largest concern when using libgreen is interoperating with native
+//! code. Care should be taken when calling native code that will block the OS
+//! thread as it will prevent further green tasks from being scheduled on the
+//! OS thread.
+//!
+//! * Native code using thread-local-storage should be approached
+//! with care. Green threads may migrate among OS threads at any time, so
+//! native libraries using thread-local state may not always work.
+//!
+//! * Native synchronization primitives (e.g. pthread mutexes) will also not
+//! work for green threads. The reason for this is because native primitives
+//! often operate on a _os thread_ granularity whereas green threads are
+//! operating on a more granular unit of work.
+//!
+//! * A green threading runtime is not fork-safe. If the process forks(), it
+//! cannot expect to make reasonable progress by continuing to use green
+//! threads.
+//!
+//! Note that these concerns do not mean that operating with native code is a
+//! lost cause. These are simply just concerns which should be considered when
+//! invoking native code.
+//!
+//! # Starting with libgreen
+//!
+//! ```rust
+//! extern mod green;
+//!
+//! #[start]
+//! fn start(argc: int, argv: **u8) -> int { green::start(argc, argv, main) }
+//!
+//! fn main() {
+//! // this code is running in a pool of schedulers
+//! }
+//! ```
+//!
+//! # Using a scheduler pool
+//!
+//! ```rust
+//! use std::task::TaskOpts;
+//! use green::{SchedPool, PoolConfig};
+//! use green::sched::{PinnedTask, TaskFromFriend};
+//!
+//! let config = PoolConfig::new();
+//! let mut pool = SchedPool::new(config);
+//!
+//! // Spawn tasks into the pool of schedulers
+//! pool.spawn(TaskOpts::new(), proc() {
+//! // this code is running inside the pool of schedulers
+//!
+//! spawn(proc() {
+//! // this code is also running inside the same scheduler pool
+//! });
+//! });
+//!
+//! // Dynamically add a new scheduler to the scheduler pool. This adds another
+//! // OS thread that green threads can be multiplexed on to.
+//! let mut handle = pool.spawn_sched();
+//!
+//! // Pin a task to the spawned scheduler
+//! let task = pool.task(TaskOpts::new(), proc() { /* ... */ });
+//! handle.send(PinnedTask(task));
+//!
+//! // Schedule a task on this new scheduler
+//! let task = pool.task(TaskOpts::new(), proc() { /* ... */ });
+//! handle.send(TaskFromFriend(task));
+//!
+//! // Handles keep schedulers alive, so be sure to drop all handles before
+//! // destroying the sched pool
+//! drop(handle);
+//!
+//! // Required to shut down this scheduler pool.
+//! // The task will fail if `shutdown` is not called.
+//! pool.shutdown();
+//! ```
#[crate_id = "green#0.10-pre"];
#[license = "MIT/ASL2"];
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! The native runtime crate
+//! The native I/O and threading crate
//!
//! This crate contains an implementation of 1:1 scheduling for a "native"
//! runtime. In addition, all I/O provided by this crate is the thread blocking
//! version of I/O.
+//!
+//! # Starting with libnative
+//!
+//! ```rust
+//! extern mod native;
+//!
+//! #[start]
+//! fn start(argc: int, argv: **u8) -> int { native::start(argc, argv, main) }
+//!
+//! fn main() {
+//! // this code is running on the main OS thread
+//! }
+//! ```
+//!
+//! # Force spawning a native task
+//!
+//! ```rust
+//! extern mod native;
+//!
+//! fn main() {
+//! // We're not sure whether this main function is run in 1:1 or M:N mode.
+//!
+//! native::task::spawn(proc() {
+//! // this code is guaranteed to be run on a native thread
+//! });
+//! }
+//! ```
#[crate_id = "native#0.10-pre"];
#[license = "MIT/ASL2"];
}
}
+fn insertion_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
+ let len = v.len() as int;
+ let buf_v = v.as_mut_ptr();
+
+ // 1 <= i < len;
+ for i in range(1, len) {
+ // j satisfies: 0 <= j <= i;
+ let mut j = i;
+ unsafe {
+ // `i` is in bounds.
+ let read_ptr = buf_v.offset(i) as *T;
+
+ // find where to insert, we need to do strict <,
+ // rather than <=, to maintain stability.
+
+ // 0 <= j - 1 < len, so .offset(j - 1) is in bounds.
+ while j > 0 &&
+ compare(&*read_ptr, &*buf_v.offset(j - 1)) == Less {
+ j -= 1;
+ }
+
+ // shift everything to the right, to make space to
+ // insert this value.
+
+ // j + 1 could be `len` (for the last `i`), but in
+ // that case, `i == j` so we don't copy. The
+ // `.offset(j)` is always in bounds.
+
+ if i != j {
+ let tmp = ptr::read_ptr(read_ptr);
+ ptr::copy_memory(buf_v.offset(j + 1),
+ buf_v.offset(j),
+ (i - j) as uint);
+ ptr::copy_nonoverlapping_memory(buf_v.offset(j),
+ &tmp as *T,
+ 1);
+ cast::forget(tmp);
+ }
+ }
+ }
+}
+
fn merge_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
// warning: this wildly uses unsafe.
- static INSERTION: uint = 8;
+ static BASE_INSERTION: uint = 32;
+ static LARGE_INSERTION: uint = 16;
+
+ // FIXME #12092: smaller insertion runs seems to make sorting
+ // vectors of large elements a little faster on some platforms,
+ // but hasn't been tested/tuned extensively
+ let insertion = if size_of::<T>() <= 16 {
+ BASE_INSERTION
+ } else {
+ LARGE_INSERTION
+ };
let len = v.len();
+ // short vectors get sorted in-place via insertion sort to avoid allocations
+ if len <= insertion {
+ insertion_sort(v, compare);
+ return;
+ }
+
// allocate some memory to use as scratch memory, we keep the
// length 0 so we can keep shallow copies of the contents of `v`
// without risking the dtors running on an object twice if
// We could hardcode the sorting comparisons here, and we could
// manipulate/step the pointers themselves, rather than repeatedly
// .offset-ing.
- for start in range_step(0, len, INSERTION) {
- // start <= i <= len;
- for i in range(start, cmp::min(start + INSERTION, len)) {
+ for start in range_step(0, len, insertion) {
+ // start <= i < len;
+ for i in range(start, cmp::min(start + insertion, len)) {
// j satisfies: start <= j <= i;
let mut j = i as int;
unsafe {
}
// step 2. merge the sorted runs.
- let mut width = INSERTION;
+ let mut width = insertion;
while width < len {
// merge the sorted runs of length `width` in `buf_dat` two at
// a time, placing the result in `buf_tmp`.
});
bh.bytes = (v.len() * mem::size_of_val(&v[0])) as u64;
}
+
+ type BigSortable = (u64,u64,u64,u64);
+
+ #[bench]
+ fn sort_big_random_small(bh: &mut BenchHarness) {
+ let mut rng = weak_rng();
+ bh.iter(|| {
+ let mut v: ~[BigSortable] = rng.gen_vec(5);
+ v.sort();
+ });
+ bh.bytes = 5 * mem::size_of::<BigSortable>() as u64;
+ }
+
+ #[bench]
+ fn sort_big_random_medium(bh: &mut BenchHarness) {
+ let mut rng = weak_rng();
+ bh.iter(|| {
+ let mut v: ~[BigSortable] = rng.gen_vec(100);
+ v.sort();
+ });
+ bh.bytes = 100 * mem::size_of::<BigSortable>() as u64;
+ }
+
+ #[bench]
+ fn sort_big_random_large(bh: &mut BenchHarness) {
+ let mut rng = weak_rng();
+ bh.iter(|| {
+ let mut v: ~[BigSortable] = rng.gen_vec(10000);
+ v.sort();
+ });
+ bh.bytes = 10000 * mem::size_of::<BigSortable>() as u64;
+ }
+
+ #[bench]
+ fn sort_big_sorted(bh: &mut BenchHarness) {
+ let mut v = vec::from_fn(10000u, |i| (i, i, i, i));
+ bh.iter(|| {
+ v.sort();
+ });
+ bh.bytes = (v.len() * mem::size_of_val(&v[0])) as u64;
+ }
}