2 use rustc::middle::region;
3 use rustc::ty::layout::Size;
5 ////////////////////////////////////////////////////////////////////////////////
7 ////////////////////////////////////////////////////////////////////////////////
9 /// Information about a lock that is currently held.
10 #[derive(Clone, Debug)]
11 pub struct LockInfo<'tcx> {
12 /// Stores for which lifetimes (of the original write lock) we got
13 /// which suspensions.
14 suspended: HashMap<WriteLockId<'tcx>, Vec<region::Scope>>,
15 /// The current state of the lock that's actually effective.
19 /// Write locks are identified by a stack frame and an "abstract" (untyped) place.
20 /// It may be tempting to use the lifetime as identifier, but that does not work
22 /// * First of all, due to subtyping, the same lock may be referred to with different
24 /// * Secondly, different write locks may actually have the same lifetime. See `test2`
25 /// in `run-pass/many_shr_bor.rs`.
26 /// The Id is "captured" when the lock is first suspended; at that point, the borrow checker
27 /// considers the path frozen and hence the Id remains stable.
28 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
29 pub struct WriteLockId<'tcx> {
35 use rustc::mir::interpret::Lock::*;
36 use rustc::mir::interpret::Lock;
38 impl<'tcx> Default for LockInfo<'tcx> {
39 fn default() -> Self {
44 impl<'tcx> LockInfo<'tcx> {
45 fn new(lock: Lock) -> LockInfo<'tcx> {
47 suspended: HashMap::new(),
52 fn access_permitted(&self, frame: Option<usize>, access: AccessKind) -> bool {
53 use super::AccessKind::*;
54 match (&self.active, access) {
56 (&ReadLock(ref lfts), Read) => {
57 assert!(!lfts.is_empty(), "Someone left an empty read lock behind.");
58 // Read access to read-locked region is okay, no matter who's holding the read lock.
61 (&WriteLock(ref lft), _) => {
62 // All access is okay if we are the ones holding it
63 Some(lft.frame) == frame
65 _ => false, // Nothing else is okay.
70 pub trait MemoryExt<'tcx> {
76 ) -> EvalResult<'tcx>;
81 region: Option<region::Scope>,
83 ) -> EvalResult<'tcx>;
84 fn suspend_write_lock(
88 lock_path: &AbsPlace<'tcx>,
89 suspend: Option<region::Scope>,
90 ) -> EvalResult<'tcx>;
91 fn recover_write_lock(
95 lock_path: &AbsPlace<'tcx>,
96 lock_region: Option<region::Scope>,
97 suspended_region: region::Scope,
98 ) -> EvalResult<'tcx>;
99 fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>);
103 impl<'a, 'mir, 'tcx: 'mir + 'a> MemoryExt<'tcx> for Memory<'a, 'mir, 'tcx, Evaluator<'tcx>> {
109 ) -> EvalResult<'tcx> {
113 let locks = match self.data.locks.get(&ptr.alloc_id) {
114 Some(locks) => locks,
115 // immutable static or other constant memory
116 None => return Ok(()),
118 let frame = self.cur_frame;
120 .check(Some(frame), ptr.offset.bytes(), len, access)
122 EvalErrorKind::MemoryLockViolation {
132 /// Acquire the lock for the given lifetime
137 region: Option<region::Scope>,
139 ) -> EvalResult<'tcx> {
140 let frame = self.cur_frame;
143 "Frame {} acquiring {:?} lock at {:?}, size {} for region {:?}",
150 self.check_bounds(ptr.offset(Size::from_bytes(len), &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
152 let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
153 Some(locks) => locks,
154 // immutable static or other constant memory
155 None => return Ok(()),
158 // Iterate over our range and acquire the lock. If the range is already split into pieces,
159 // we have to manipulate all of them.
160 let lifetime = DynamicLifetime { frame, region };
161 for lock in locks.iter_mut(ptr.offset.bytes(), len) {
162 if !lock.access_permitted(None, kind) {
163 return err!(MemoryAcquireConflict {
167 lock: lock.active.clone(),
170 // See what we have to do
171 match (&mut lock.active, kind) {
172 (active @ &mut NoLock, AccessKind::Write) => {
173 *active = WriteLock(lifetime);
175 (active @ &mut NoLock, AccessKind::Read) => {
176 *active = ReadLock(vec![lifetime]);
178 (&mut ReadLock(ref mut lifetimes), AccessKind::Read) => {
179 lifetimes.push(lifetime);
181 _ => bug!("We already checked that there is no conflicting lock"),
187 /// Release or suspend a write lock of the given lifetime prematurely.
188 /// When releasing, if there is a read lock or someone else's write lock, that's an error.
189 /// If no lock is held, that's fine. This can happen when e.g. a local is initialized
190 /// from a constant, and then suspended.
191 /// When suspending, the same cases are fine; we just register an additional suspension.
192 fn suspend_write_lock(
196 lock_path: &AbsPlace<'tcx>,
197 suspend: Option<region::Scope>,
198 ) -> EvalResult<'tcx> {
200 let cur_frame = self.cur_frame;
201 let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
202 Some(locks) => locks,
203 // immutable static or other constant memory
204 None => return Ok(()),
207 'locks: for lock in locks.iter_mut(ptr.offset.bytes(), len) {
208 let is_our_lock = match lock.active {
210 // Double-check that we are holding the lock.
211 // (Due to subtyping, checking the region would not make any sense.)
212 lft.frame == cur_frame,
213 ReadLock(_) | NoLock => false,
216 trace!("Releasing {:?}", lock.active);
218 lock.active = NoLock;
221 "Not touching {:?} as it is not our lock",
225 // Check if we want to register a suspension
226 if let Some(suspend_region) = suspend {
227 let lock_id = WriteLockId {
229 path: lock_path.clone(),
231 trace!("Adding suspension to {:?}", lock_id);
232 let mut new_suspension = false;
235 // Remember whether we added a new suspension or not
236 .or_insert_with(|| { new_suspension = true; Vec::new() })
237 .push(suspend_region);
238 // If the suspension is new, we should have owned this.
239 // If there already was a suspension, we should NOT have owned this.
240 if new_suspension == is_our_lock {
244 } else if !is_our_lock {
248 // If we get here, releasing this is an error except for NoLock.
249 if lock.active != NoLock {
250 return err!(InvalidMemoryLockRelease {
254 lock: lock.active.clone(),
262 /// Release a suspension from the write lock. If this is the last suspension or if there is no suspension, acquire the lock.
263 fn recover_write_lock(
267 lock_path: &AbsPlace<'tcx>,
268 lock_region: Option<region::Scope>,
269 suspended_region: region::Scope,
270 ) -> EvalResult<'tcx> {
272 let cur_frame = self.cur_frame;
273 let lock_id = WriteLockId {
275 path: lock_path.clone(),
277 let locks = match self.data.locks.get_mut(&ptr.alloc_id) {
278 Some(locks) => locks,
279 // immutable static or other constant memory
280 None => return Ok(()),
283 for lock in locks.iter_mut(ptr.offset.bytes(), len) {
284 // Check if we have a suspension here
285 let (got_the_lock, remove_suspension) = match lock.suspended.get_mut(&lock_id) {
287 trace!("No suspension around, we can just acquire");
290 Some(suspensions) => {
291 trace!("Found suspension of {:?}, removing it", lock_id);
292 // That's us! Remove suspension (it should be in there). The same suspension can
293 // occur multiple times (when there are multiple shared borrows of this that have the same
294 // lifetime); only remove one of them.
295 let idx = match suspensions.iter().enumerate().find(|&(_, re)| re == &suspended_region) {
296 None => // TODO: Can the user trigger this?
297 bug!("We have this lock suspended, but not for the given region."),
298 Some((idx, _)) => idx
300 suspensions.remove(idx);
301 let got_lock = suspensions.is_empty();
303 trace!("All suspensions are gone, we can have the lock again");
308 if remove_suspension {
309 // with NLL, we could do that up in the match above...
310 assert!(got_the_lock);
311 lock.suspended.remove(&lock_id);
315 ref mut active @ NoLock => {
324 return err!(MemoryAcquireConflict {
327 kind: AccessKind::Write,
328 lock: lock.active.clone(),
338 fn locks_lifetime_ended(&mut self, ending_region: Option<region::Scope>) {
339 let cur_frame = self.cur_frame;
341 "Releasing frame {} locks that expire at {:?}",
345 let has_ended = |lifetime: &DynamicLifetime| -> bool {
346 if lifetime.frame != cur_frame {
349 match ending_region {
350 None => true, // When a function ends, we end *all* its locks. It's okay for a function to still have lifetime-related locks
351 // when it returns, that can happen e.g. with NLL when a lifetime can, but does not have to, extend beyond the
352 // end of a function. Same for a function still having recoveries.
353 Some(ending_region) => lifetime.region == Some(ending_region),
357 for alloc_locks in self.data.locks.values_mut() {
358 for lock in alloc_locks.iter_mut_all() {
359 // Delete everything that ends now -- i.e., keep only all the other lifetimes.
360 let lock_ended = match lock.active {
361 WriteLock(ref lft) => has_ended(lft),
362 ReadLock(ref mut lfts) => {
363 lfts.retain(|lft| !has_ended(lft));
369 lock.active = NoLock;
371 // Also clean up suspended write locks when the function returns
372 if ending_region.is_none() {
373 lock.suspended.retain(|id, _suspensions| id.frame != cur_frame);
377 alloc_locks.retain(|lock| match lock.active {
378 NoLock => !lock.suspended.is_empty(),
385 impl<'tcx> RangeMap<LockInfo<'tcx>> {
388 frame: Option<usize>,
392 ) -> Result<(), LockInfo<'tcx>> {
396 for lock in self.iter(offset, len) {
397 // Check if the lock is in conflict with the access.
398 if !lock.access_permitted(frame, access) {
399 return Err(lock.clone());