]> git.lizzy.rs Git - rust.git/blob - src/rt/rust_kernel.cpp
fix how we walk functions to match new closure fmt
[rust.git] / src / rt / rust_kernel.cpp
1 #include "rust_internal.h"
2 #include "rust_util.h"
3
4 #define KLOG_(...)                              \
5     KLOG(this, kern, __VA_ARGS__)
6 #define KLOG_ERR_(field, ...)                   \
7     KLOG_LVL(this, field, log_err, __VA_ARGS__)
8
9 rust_kernel::rust_kernel(rust_srv *srv, size_t num_threads) :
10     _region(srv, true),
11     _log(srv, NULL),
12     srv(srv),
13     max_id(0),
14     num_threads(num_threads),
15     rval(0),
16     live_tasks(0),
17     env(srv->env)
18 {
19     isaac_init(this, &rctx);
20     create_schedulers();
21 }
22
23 rust_scheduler *
24 rust_kernel::create_scheduler(int id) {
25     _kernel_lock.lock();
26     rust_srv *srv = this->srv->clone();
27     rust_scheduler *sched =
28         new (this, "rust_scheduler") rust_scheduler(this, srv, id);
29     KLOG_("created scheduler: " PTR ", id: %d, index: %d",
30           sched, id, sched->list_index);
31     _kernel_lock.signal_all();
32     _kernel_lock.unlock();
33     return sched;
34 }
35
36 void
37 rust_kernel::destroy_scheduler(rust_scheduler *sched) {
38     _kernel_lock.lock();
39     KLOG_("deleting scheduler: " PTR ", name: %s, index: %d",
40         sched, sched->name, sched->list_index);
41     rust_srv *srv = sched->srv;
42     delete sched;
43     delete srv;
44     _kernel_lock.signal_all();
45     _kernel_lock.unlock();
46 }
47
48 void rust_kernel::create_schedulers() {
49     KLOG_("Using %d scheduler threads.", num_threads);
50
51     for(size_t i = 0; i < num_threads; ++i) {
52         threads.push(create_scheduler(i));
53     }
54 }
55
56 void rust_kernel::destroy_schedulers() {
57     for(size_t i = 0; i < num_threads; ++i) {
58         destroy_scheduler(threads[i]);
59     }
60 }
61
62 void
63 rust_kernel::log_all_scheduler_state() {
64     for(size_t i = 0; i < num_threads; ++i) {
65         threads[i]->log_state();
66     }
67 }
68
69 /**
70  * Checks for simple deadlocks.
71  */
72 bool
73 rust_kernel::is_deadlocked() {
74     return false;
75 }
76
77 void
78 rust_kernel::log(uint32_t level, char const *fmt, ...) {
79     char buf[BUF_BYTES];
80     va_list args;
81     va_start(args, fmt);
82     vsnprintf(buf, sizeof(buf), fmt, args);
83     _log.trace_ln(NULL, level, buf);
84     va_end(args);
85 }
86
87 void
88 rust_kernel::fatal(char const *fmt, ...) {
89     char buf[BUF_BYTES];
90     va_list args;
91     va_start(args, fmt);
92     vsnprintf(buf, sizeof(buf), fmt, args);
93     _log.trace_ln(NULL, (uint32_t)0, buf);
94     exit(1);
95     va_end(args);
96 }
97
98 rust_kernel::~rust_kernel() {
99     destroy_schedulers();
100 }
101
102 void *
103 rust_kernel::malloc(size_t size, const char *tag) {
104     return _region.malloc(size, tag);
105 }
106
107 void *
108 rust_kernel::realloc(void *mem, size_t size) {
109     return _region.realloc(mem, size);
110 }
111
112 void rust_kernel::free(void *mem) {
113     _region.free(mem);
114 }
115
116 void
117 rust_kernel::signal_kernel_lock() {
118     _kernel_lock.lock();
119     _kernel_lock.signal_all();
120     _kernel_lock.unlock();
121 }
122
123 int rust_kernel::start_task_threads()
124 {
125     for(size_t i = 0; i < num_threads; ++i) {
126         rust_scheduler *thread = threads[i];
127         thread->start();
128     }
129
130     for(size_t i = 0; i < num_threads; ++i) {
131         rust_scheduler *thread = threads[i];
132         thread->join();
133     }
134
135     return rval;
136 }
137
138 void
139 rust_kernel::fail() {
140     // FIXME: On windows we're getting "Application has requested the
141     // Runtime to terminate it in an unusual way" when trying to shutdown
142     // cleanly.
143 #if defined(__WIN32__)
144     exit(rval);
145 #endif
146     for(size_t i = 0; i < num_threads; ++i) {
147         rust_scheduler *thread = threads[i];
148         thread->kill_all_tasks();
149     }
150 }
151
152 rust_task_id
153 rust_kernel::create_task(rust_task *spawner, const char *name) {
154     scoped_lock with(_kernel_lock);
155     rust_scheduler *thread = threads[isaac_rand(&rctx) % num_threads];
156     rust_task *t = thread->create_task(spawner, name);
157     t->user.id = max_id++;
158     task_table.put(t->user.id, t);
159     return t->user.id;
160 }
161
162 rust_task *
163 rust_kernel::get_task_by_id(rust_task_id id) {
164     scoped_lock with(_kernel_lock);
165     rust_task *task = NULL;
166     // get leaves task unchanged if not found.
167     task_table.get(id, &task);
168     if(task) {
169         if(task->get_ref_count() == 0) {
170             // this means the destructor is running, since the destructor
171             // grabs the kernel lock to unregister the task. Pretend this
172             // doesn't actually exist.
173             return NULL;
174         }
175         else {
176             task->ref();
177         }
178     }
179     return task;
180 }
181
182 void
183 rust_kernel::release_task_id(rust_task_id id) {
184     scoped_lock with(_kernel_lock);
185     task_table.remove(id);
186 }
187
188 void rust_kernel::wakeup_schedulers() {
189     for(size_t i = 0; i < num_threads; ++i) {
190         threads[i]->lock.signal_all();
191     }
192 }
193
194 #ifdef __WIN32__
195 void
196 rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
197     if (!ok) {
198         LPTSTR buf;
199         DWORD err = GetLastError();
200         FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
201                       FORMAT_MESSAGE_FROM_SYSTEM |
202                       FORMAT_MESSAGE_IGNORE_INSERTS,
203                       NULL, err,
204                       MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
205                       (LPTSTR) &buf, 0, NULL );
206         KLOG_ERR_(dom, "%s failed with error %ld: %s", fn, err, buf);
207         LocalFree((HLOCAL)buf);
208         I(this, ok);
209     }
210 }
211 #endif
212
213 //
214 // Local Variables:
215 // mode: C++
216 // fill-column: 78;
217 // indent-tabs-mode: nil
218 // c-basic-offset: 4
219 // buffer-file-coding-system: utf-8-unix
220 // End:
221 //