1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 // Code relating to drop glue.
18 use llvm::{ValueRef, get_param};
19 use middle::lang_items::ExchangeFreeFnLangItem;
20 use rustc::ty::subst::{Substs};
22 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
24 use adt::GetDtorType; // for tcx.dtor_type()
27 use callee::{Callee, ArgVals};
29 use cleanup::CleanupMethods;
32 use debuginfo::DebugLoc;
36 use trans_item::TransItem;
37 use type_of::{type_of, sizing_type_of, align_of};
41 use arena::TypedArena;
42 use syntax_pos::DUMMY_SP;
44 pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
49 -> Block<'blk, 'tcx> {
50 let _icx = push_ctxt("trans_exchange_free");
52 let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
53 let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
54 Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty()))
55 .call(bcx, debug_loc, ArgVals(&args), None).bcx
58 pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
63 -> Block<'blk, 'tcx> {
64 trans_exchange_free_dyn(cx,
66 C_uint(cx.ccx(), size),
67 C_uint(cx.ccx(), align),
71 pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
75 -> Block<'blk, 'tcx> {
76 assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
77 let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
78 let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
80 // `Box<ZeroSizeType>` does not allocate.
81 if content_size != 0 {
82 let content_align = align_of(bcx.ccx(), content_ty);
83 trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
89 pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
90 ty: Ty<'tcx>) -> bool {
91 tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
94 pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
95 t: Ty<'tcx>) -> Ty<'tcx> {
96 assert!(t.is_normalized_for_trans());
98 // Even if there is no dtor for t, there might be one deeper down and we
99 // might need to pass in the vtable ptr.
100 if !type_is_sized(tcx, t) {
104 // FIXME (#22815): note that type_needs_drop conservatively
105 // approximates in some cases and may say a type expression
106 // requires drop glue when it actually does not.
108 // (In this case it is not clear whether any harm is done, i.e.
109 // erroneously returning `t` in some cases where we could have
110 // returned `tcx.types.i8` does not appear unsound. The impact on
111 // code quality is unknown at this time.)
113 if !type_needs_drop(tcx, t) {
117 ty::TyBox(typ) if !type_needs_drop(tcx, typ)
118 && type_is_sized(tcx, typ) => {
119 tcx.normalizing_infer_ctxt(traits::ProjectionMode::Any).enter(|infcx| {
120 let layout = t.layout(&infcx).unwrap();
121 if layout.size(&tcx.data_layout).bytes() == 0 {
122 // `Box<ZeroSizeType>` does not allocate.
133 pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
136 debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
137 drop_ty_core(bcx, v, t, debug_loc, false, None)
140 pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
145 drop_hint: Option<cleanup::DropHintValue>)
146 -> Block<'blk, 'tcx> {
147 // NB: v is an *alias* of type t here, not a direct value.
148 debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
149 let _icx = push_ctxt("drop_ty");
151 if bcx.fcx.type_needs_drop(t) {
153 let g = if skip_dtor {
154 DropGlueKind::TyContents(t)
158 let glue = get_drop_glue_core(ccx, g);
159 let glue_type = get_drop_glue_type(ccx.tcx(), t);
160 let ptr = if glue_type != t {
161 PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
168 let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
170 C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
172 ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
173 bcx = with_cond(bcx, may_need_drop, |cx| {
174 Call(cx, glue, &[ptr], debug_loc);
179 // No drop-hint ==> call standard drop glue
180 Call(bcx, glue, &[ptr], debug_loc);
187 pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
192 -> Block<'blk, 'tcx> {
193 let _icx = push_ctxt("drop_ty_immediate");
194 let vp = alloc_ty(bcx, t, "");
195 call_lifetime_start(bcx, vp);
196 store_ty(bcx, v, vp, t);
197 let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
198 call_lifetime_end(bcx, vp);
202 pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
203 get_drop_glue_core(ccx, DropGlueKind::Ty(t))
206 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
207 pub enum DropGlueKind<'tcx> {
208 /// The normal path; runs the dtor, and then recurs on the contents
210 /// Skips the dtor, if any, for ty; drops the contents directly.
211 /// Note that the dtor is only skipped at the most *shallow*
212 /// level, namely, an `impl Drop for Ty` itself. So, for example,
213 /// if Ty is Newtype(S) then only the Drop impl for Newtype itself
214 /// will be skipped, while the Drop impl for S, if any, will be
216 TyContents(Ty<'tcx>),
219 impl<'tcx> DropGlueKind<'tcx> {
220 pub fn ty(&self) -> Ty<'tcx> {
221 match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
224 pub fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
227 DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
228 DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
233 fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
234 g: DropGlueKind<'tcx>) -> ValueRef {
235 let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t));
236 match ccx.drop_glues().borrow().get(&g) {
237 Some(&(glue, _)) => glue,
238 None => { bug!("Could not find drop glue for {:?} -- {} -- {}. \
239 It should have be instantiated during the pre-definition phase",
241 TransItem::DropGlue(g).to_raw_string(),
242 ccx.codegen_unit().name) }
246 pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
247 g: DropGlueKind<'tcx>) {
249 assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty()));
250 let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
252 let (arena, fcx): (TypedArena<_>, FunctionContext);
253 arena = TypedArena::new();
254 fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena);
256 let bcx = fcx.init(false, None);
258 ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
259 // All glue functions take values passed *by alias*; this is a
260 // requirement since in many contexts glue is invoked indirectly and
261 // the caller has no idea if it's dealing with something that can be
264 // llfn is expected be declared to take a parameter of the appropriate
265 // type, so we don't need to explicitly cast the function parameter.
267 let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
268 fcx.finish(bcx, DebugLoc::None);
272 fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
274 struct_data: ValueRef)
275 -> Block<'blk, 'tcx> {
276 assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
278 let repr = adt::represent_type(bcx.ccx(), t);
279 let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
280 let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
281 let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
282 let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
284 let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
287 let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
288 let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
289 let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
290 let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
291 let drop_flag_neither_initialized_nor_cleared =
292 And(bcx, not_init, not_done, DebugLoc::None);
293 with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
294 let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
295 Call(cx, llfn, &[], DebugLoc::None);
300 let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
301 with_cond(bcx, drop_flag_dtor_needed, |cx| {
302 trans_struct_drop(cx, t, struct_data)
305 fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
310 debug!("trans_struct_drop t: {}", t);
314 let def = t.ty_adt_def().unwrap();
316 // Be sure to put the contents into a scope so we can use an invoke
317 // instruction to call the user destructor but still call the field
318 // destructors if the user destructor panics.
320 // FIXME (#14875) panic-in-drop semantics might be unsupported; we
321 // might well consider changing below to more direct code.
322 let contents_scope = bcx.fcx.push_custom_cleanup_scope();
324 // Issue #23611: schedule cleanup of contents, re-inspecting the
325 // discriminant (if any) in case of variant swap in drop code.
326 bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
328 let (sized_args, unsized_args);
329 let args: &[ValueRef] = if type_is_sized(tcx, t) {
333 unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
337 let trait_ref = ty::Binder(ty::TraitRef {
338 def_id: tcx.lang_items.drop_trait().unwrap(),
339 substs: tcx.mk_substs(Substs::empty().with_self_ty(t))
341 let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) {
342 traits::VtableImpl(data) => data,
343 _ => bug!("dtor for {:?} is not an impl???", t)
345 let dtor_did = def.destructor().unwrap();
346 bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
347 .call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
349 bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
352 pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
353 t: Ty<'tcx>, info: ValueRef)
354 -> (ValueRef, ValueRef) {
355 debug!("calculate size of DST: {}; with lost info: {:?}",
357 if type_is_sized(bcx.tcx(), t) {
358 let sizing_type = sizing_type_of(bcx.ccx(), t);
359 let size = llsize_of_alloc(bcx.ccx(), sizing_type);
360 let align = align_of(bcx.ccx(), t);
361 debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
362 t, Value(info), size, align);
363 let size = C_uint(bcx.ccx(), size);
364 let align = C_uint(bcx.ccx(), align);
365 return (size, align);
367 if bcx.is_unreachable() {
368 let llty = Type::int(bcx.ccx());
369 return (C_undef(llty), C_undef(llty));
372 ty::TyStruct(def, substs) => {
374 // First get the size of all statically known fields.
375 // Don't use type_of::sizing_type_of because that expects t to be sized.
376 assert!(!t.is_simd());
377 let repr = adt::represent_type(ccx, t);
378 let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
379 debug!("DST {} sizing_type: {:?}", t, sizing_type);
380 let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
381 let sized_align = llalign_of_min(ccx, sizing_type.prefix());
382 debug!("DST {} statically sized prefix size: {} align: {}",
383 t, sized_size, sized_align);
384 let sized_size = C_uint(ccx, sized_size);
385 let sized_align = C_uint(ccx, sized_align);
387 // Recurse to get the size of the dynamically sized field (must be
389 let last_field = def.struct_variant().fields.last().unwrap();
390 let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
391 let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
393 // FIXME (#26403, #27023): We should be adding padding
394 // to `sized_size` (to accommodate the `unsized_align`
395 // required of the unsized field that follows) before
396 // summing it with `sized_size`. (Note that since #26403
397 // is unfixed, we do not yet add the necessary padding
398 // here. But this is where the add would go.)
400 // Return the sum of sizes and max of aligns.
401 let mut size = bcx.add(sized_size, unsized_size);
403 // Issue #27023: If there is a drop flag, *now* we add 1
404 // to the size. (We can do this without adding any
405 // padding because drop flags do not have any alignment
407 if sizing_type.needs_drop_flag() {
408 size = bcx.add(size, C_uint(bcx.ccx(), 1_u64));
411 // Choose max of two known alignments (combined value must
412 // be aligned according to more restrictive of the two).
413 let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
414 (Some(sized_align), Some(unsized_align)) => {
415 // If both alignments are constant, (the sized_align should always be), then
416 // pick the correct alignment statically.
417 C_uint(ccx, std::cmp::max(sized_align, unsized_align))
419 _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
424 // Issue #27023: must add any necessary padding to `size`
425 // (to make it a multiple of `align`) before returning it.
427 // Namely, the returned size should be, in C notation:
429 // `size + ((size & (align-1)) ? align : 0)`
431 // emulated via the semi-standard fast bit trick:
433 // `(size + (align-1)) & -align`
435 let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64));
436 let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
441 // info points to the vtable and the second entry in the vtable is the
442 // dynamic size of the object.
443 let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to());
444 let size_ptr = bcx.gepi(info, &[1]);
445 let align_ptr = bcx.gepi(info, &[2]);
446 (bcx.load(size_ptr), bcx.load(align_ptr))
448 ty::TySlice(_) | ty::TyStr => {
449 let unit_ty = t.sequence_element_type(bcx.tcx());
450 // The info in this case is the length of the str, so the size is that
451 // times the unit size.
452 let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
453 let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
454 let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
455 (bcx.mul(info, C_uint(bcx.ccx(), unit_size)),
456 C_uint(bcx.ccx(), unit_align))
458 _ => bug!("Unexpected unsized type, found {}", t)
462 fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
463 -> Block<'blk, 'tcx> {
464 if collector::collecting_debug_information(bcx.ccx().shared()) {
466 .record_translation_item_as_generated(TransItem::DropGlue(g));
471 let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
472 // NB: v0 is an *alias* of type t here, not a direct value.
473 let _icx = push_ctxt("make_drop_glue");
475 // Only drop the value when it ... well, we used to check for
476 // non-null, (and maybe we need to continue doing so), but we now
477 // must definitely check for special bit-patterns corresponding to
478 // the special dtor markings.
480 let inttype = Type::int(bcx.ccx());
481 let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
484 ty::TyBox(content_ty) => {
485 // Support for TyBox is built-in and its drop glue is
486 // special. It may move to library and have Drop impl. As
487 // a safe-guard, assert TyBox not used with TyContents.
489 if !type_is_sized(bcx.tcx(), content_ty) {
490 let llval = expr::get_dataptr(bcx, v0);
491 let llbox = Load(bcx, llval);
492 let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
493 let drop_flag_not_dropped_already =
494 ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
495 with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
496 let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
497 let info = expr::get_meta(bcx, v0);
498 let info = Load(bcx, info);
499 let (llsize, llalign) =
500 size_and_align_of_dst(&bcx.build(), content_ty, info);
502 // `Box<ZeroSizeType>` does not allocate.
503 let needs_free = ICmp(bcx,
506 C_uint(bcx.ccx(), 0u64),
508 with_cond(bcx, needs_free, |bcx| {
509 trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
514 let llbox = Load(bcx, llval);
515 let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
516 let drop_flag_not_dropped_already =
517 ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
518 with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
519 let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
520 trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
524 ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
525 match (def.dtor_kind(), skip_dtor) {
526 (ty::TraitDtor(true), false) => {
527 // FIXME(16758) Since the struct is unsized, it is hard to
528 // find the drop flag (which is at the end of the struct).
529 // Lets just ignore the flag and pretend everything will be
531 if type_is_sized(bcx.tcx(), t) {
532 trans_struct_drop_flag(bcx, t, v0)
534 // Give the user a heads up that we are doing something
535 // stupid and dangerous.
536 bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \
537 because the struct is unsized. See issue \
539 trans_struct_drop(bcx, t, v0)
542 (ty::TraitDtor(false), false) => {
543 trans_struct_drop(bcx, t, v0)
545 (ty::NoDtor, _) | (_, true) => {
546 // No dtor? Just the default case
547 iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
552 // No support in vtable for distinguishing destroying with
553 // versus without calling Drop::drop. Assert caller is
554 // okay with always calling the Drop impl, if any.
556 let data_ptr = expr::get_dataptr(bcx, v0);
557 let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
558 let dtor = Load(bcx, vtable_ptr);
561 &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
566 if bcx.fcx.type_needs_drop(t) {
567 iter_structural_ty(bcx,
570 |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))