1 // A "shape" is a compact encoding of a type that is used by interpreted glue.
2 // This substitutes for the runtime tags used by e.g. MLs.
4 import lib::llvm::True;
5 import lib::llvm::llvm::{ModuleRef, TypeRef, ValueRef};
6 import driver::session;
7 import middle::{trans, trans_common};
8 import middle::trans_common::{crate_ctxt, val_ty, C_bytes,
9 C_named_struct, C_struct, T_tag_variant};
11 import middle::ty::field;
13 import syntax::ast_util::dummy_sp;
14 import syntax::util::interner;
17 import core::{vec, str};
18 import std::map::hashmap;
19 import option::{none, some};
21 import ty_ctxt = middle::ty::ctxt;
23 type res_info = {did: ast::def_id, t: ty::t};
26 {mutable next_tag_id: u16,
28 tag_id_to_index: hashmap<ast::def_id, u16>,
29 mutable tag_order: [ast::def_id],
30 resources: interner::interner<res_info>,
31 llshapetablesty: TypeRef,
32 llshapetables: ValueRef};
34 const shape_u8: u8 = 0u8;
35 const shape_u16: u8 = 1u8;
36 const shape_u32: u8 = 2u8;
37 const shape_u64: u8 = 3u8;
38 const shape_i8: u8 = 4u8;
39 const shape_i16: u8 = 5u8;
40 const shape_i32: u8 = 6u8;
41 const shape_i64: u8 = 7u8;
42 const shape_f32: u8 = 8u8;
43 const shape_f64: u8 = 9u8;
44 // (10 is currently unused, was evec)
45 const shape_vec: u8 = 11u8;
46 const shape_tag: u8 = 12u8;
47 const shape_box: u8 = 13u8;
48 const shape_struct: u8 = 17u8;
49 const shape_fn: u8 = 18u8;
50 const shape_obj: u8 = 19u8;
51 const shape_res: u8 = 20u8;
52 const shape_var: u8 = 21u8;
53 const shape_uniq: u8 = 22u8;
54 const shape_opaque_closure_ptr: u8 = 23u8; // the closure itself.
56 // FIXME: This is a bad API in trans_common.
57 fn C_u8(n: u8) -> ValueRef { ret trans_common::C_u8(n as uint); }
59 fn hash_res_info(ri: res_info) -> uint {
62 h += ri.did.crate as uint;
64 h += ri.did.node as uint;
70 fn eq_res_info(a: res_info, b: res_info) -> bool {
71 ret a.did.crate == b.did.crate && a.did.node == b.did.node && a.t == b.t;
74 fn mk_global(ccx: @crate_ctxt, name: str, llval: ValueRef, internal: bool) ->
79 lib::llvm::llvm::LLVMAddGlobal(ccx.llmod,
82 lib::llvm::llvm::LLVMSetInitializer(llglobal, llval);
83 lib::llvm::llvm::LLVMSetGlobalConstant(llglobal, True);
86 lib::llvm::llvm::LLVMSetLinkage(llglobal,
87 lib::llvm::LLVMInternalLinkage as
88 lib::llvm::llvm::Linkage);
95 // Computes a set of variants of a tag that are guaranteed to have size and
96 // alignment at least as large as any other variant of the tag. This is an
97 // important performance optimization.
99 // TODO: Use this in dynamic_size_of() as well.
101 fn largest_variants(ccx: @crate_ctxt, tag_id: ast::def_id) -> [uint] {
102 // Compute the minimum and maximum size and alignment for each variant.
104 // TODO: We could do better here; e.g. we know that any variant that
105 // contains (T,T) must be as least as large as any variant that contains
108 let variants = ty::tag_variants(ccx.tcx, tag_id);
109 for variant: ty::variant_info in *variants {
111 let {a: min_size, b: min_align} = {a: 0u, b: 0u};
112 for elem_t: ty::t in variant.args {
113 if ty::type_contains_params(ccx.tcx, elem_t) {
114 // TODO: We could do better here; this causes us to
115 // conservatively assume that (int, T) has minimum size 0,
116 // when in fact it has minimum size sizeof(int).
119 // Could avoid this check: the constraint should
120 // follow from how elem_t doesn't contain params.
121 // (Could add a postcondition to type_contains_params,
122 // once we implement Issue #586.)
123 check (trans_common::type_has_static_size(ccx, elem_t));
124 let llty = trans::type_of(ccx, dummy_sp(), elem_t);
125 min_size += trans::llsize_of_real(ccx, llty);
126 min_align += trans::llalign_of_real(ccx, llty);
131 [{size: {min: min_size, bounded: bounded},
132 align: {min: min_align, bounded: bounded}}];
135 // Initialize the candidate set to contain all variants.
136 let candidates = [mutable];
137 for variant in *variants { candidates += [mutable true]; }
139 // Do a pairwise comparison among all variants still in the candidate set.
140 // Throw out any variant that we know has size and alignment at least as
141 // small as some other variant.
143 while i < vec::len(ranges) - 1u {
146 while j < vec::len(ranges) {
148 if ranges[i].size.bounded && ranges[i].align.bounded &&
149 ranges[j].size.bounded && ranges[j].align.bounded {
150 if ranges[i].size >= ranges[j].size &&
151 ranges[i].align >= ranges[j].align {
153 candidates[j] = false;
154 } else if ranges[j].size >= ranges[i].size &&
155 ranges[j].align >= ranges[j].align {
157 candidates[i] = false;
167 // Return the resulting set.
170 while i < vec::len(candidates) {
171 if candidates[i] { result += [i]; }
177 // Computes the static size of a tag, without using mk_tup(), which is
178 // bad for performance.
180 // TODO: Migrate trans over to use this.
182 fn round_up(size: u16, align: u8) -> u16 {
183 assert (align >= 1u8);
184 let alignment = align as u16;
185 ret size - 1u16 + alignment & !(alignment - 1u16);
188 type size_align = {size: u16, align: u8};
190 fn compute_static_tag_size(ccx: @crate_ctxt, largest_variants: [uint],
191 did: ast::def_id) -> size_align {
194 let variants = ty::tag_variants(ccx.tcx, did);
195 for vid: uint in largest_variants {
196 // We increment a "virtual data pointer" to compute the size.
198 for typ: ty::t in variants[vid].args {
199 // FIXME: there should really be a postcondition
200 // on tag_variants that would obviate the need for
201 // this check. (Issue #586)
202 check (trans_common::type_has_static_size(ccx, typ));
203 lltys += [trans::type_of(ccx, dummy_sp(), typ)];
206 let llty = trans_common::T_struct(lltys);
207 let dp = trans::llsize_of_real(ccx, llty) as u16;
208 let variant_align = trans::llalign_of_real(ccx, llty) as u8;
210 if max_size < dp { max_size = dp; }
211 if max_align < variant_align { max_align = variant_align; }
214 // Add space for the tag if applicable.
215 // FIXME (issue #792): This is wrong. If the tag starts with an 8 byte
216 // aligned quantity, we don't align it.
217 if vec::len(*variants) > 1u {
218 let variant_t = T_tag_variant(ccx);
219 max_size += trans::llsize_of_real(ccx, variant_t) as u16;
220 let align = trans::llalign_of_real(ccx, variant_t) as u8;
221 if max_align < align { max_align = align; }
224 ret {size: max_size, align: max_align};
227 tag tag_kind { tk_unit; tk_enum; tk_complex; }
229 fn tag_kind(ccx: @crate_ctxt, did: ast::def_id) -> tag_kind {
230 let variants = ty::tag_variants(ccx.tcx, did);
231 if vec::len(*variants) == 0u { ret tk_complex; }
232 for v: ty::variant_info in *variants {
233 if vec::len(v.args) > 0u { ret tk_complex; }
235 if vec::len(*variants) == 1u { ret tk_unit; }
240 // Returns the code corresponding to the pointer size on this architecture.
241 fn s_int(tcx: ty_ctxt) -> u8 {
242 ret alt tcx.sess.get_targ_cfg().arch {
243 session::arch_x86. { shape_i32 }
244 session::arch_x86_64. { shape_i64 }
245 session::arch_arm. { shape_i32 }
249 fn s_uint(tcx: ty_ctxt) -> u8 {
250 ret alt tcx.sess.get_targ_cfg().arch {
251 session::arch_x86. { shape_u32 }
252 session::arch_x86_64. { shape_u64 }
253 session::arch_arm. { shape_u32 }
257 fn s_float(tcx: ty_ctxt) -> u8 {
258 ret alt tcx.sess.get_targ_cfg().arch {
259 session::arch_x86. { shape_f64 }
260 session::arch_x86_64. { shape_f64 }
261 session::arch_arm. { shape_f64 }
265 fn s_variant_tag_t(tcx: ty_ctxt) -> u8 {
269 fn mk_ctxt(llmod: ModuleRef) -> ctxt {
270 let llshapetablesty = trans_common::T_named_struct("shapes");
272 str::as_buf("shapes",
274 lib::llvm::llvm::LLVMAddGlobal(llmod, llshapetablesty,
278 ret {mutable next_tag_id: 0u16,
280 tag_id_to_index: common::new_def_hash(),
281 mutable tag_order: [],
282 resources: interner::mk(hash_res_info, eq_res_info),
283 llshapetablesty: llshapetablesty,
284 llshapetables: llshapetables};
287 fn add_bool(&dest: [u8], val: bool) { dest += [if val { 1u8 } else { 0u8 }]; }
289 fn add_u16(&dest: [u8], val: u16) {
290 dest += [val & 0xffu16 as u8, val >> 8u16 as u8];
293 fn add_substr(&dest: [u8], src: [u8]) {
294 add_u16(dest, vec::len(src) as u16);
298 fn shape_of(ccx: @crate_ctxt, t: ty::t, ty_param_map: [uint],
299 is_obj_body: bool) -> [u8] {
302 alt ty::struct(ccx.tcx, t) {
303 ty::ty_nil. | ty::ty_bool. | ty::ty_uint(ast::ty_u8.) |
304 ty::ty_bot. { s += [shape_u8]; }
305 ty::ty_int(ast::ty_i.) { s += [s_int(ccx.tcx)]; }
306 ty::ty_float(ast::ty_f.) { s += [s_float(ccx.tcx)]; }
307 ty::ty_uint(ast::ty_u.) | ty::ty_ptr(_) | ty::ty_type. |
308 ty::ty_send_type. | ty::ty_native(_) { s += [s_uint(ccx.tcx)]; }
309 ty::ty_int(ast::ty_i8.) { s += [shape_i8]; }
310 ty::ty_uint(ast::ty_u16.) { s += [shape_u16]; }
311 ty::ty_int(ast::ty_i16.) { s += [shape_i16]; }
312 ty::ty_uint(ast::ty_u32.) { s += [shape_u32]; }
313 ty::ty_int(ast::ty_i32.) | ty::ty_int(ast::ty_char.) {s += [shape_i32];}
314 ty::ty_uint(ast::ty_u64.) { s += [shape_u64]; }
315 ty::ty_int(ast::ty_i64.) { s += [shape_i64]; }
316 ty::ty_float(ast::ty_f32.) { s += [shape_f32]; }
317 ty::ty_float(ast::ty_f64.) { s += [shape_f64]; }
320 add_bool(s, true); // type is POD
321 let unit_ty = ty::mk_mach_uint(ccx.tcx, ast::ty_u8);
322 add_substr(s, shape_of(ccx, unit_ty, ty_param_map, is_obj_body));
324 ty::ty_tag(did, tps) {
325 alt tag_kind(ccx, did) {
327 // FIXME: For now we do this.
328 s += [s_variant_tag_t(ccx.tcx)];
330 tk_enum. { s += [s_variant_tag_t(ccx.tcx)]; }
337 alt ccx.shape_cx.tag_id_to_index.find(did) {
339 id = ccx.shape_cx.next_tag_id;
340 ccx.shape_cx.tag_id_to_index.insert(did, id);
341 ccx.shape_cx.tag_order += [did];
342 ccx.shape_cx.next_tag_id += 1u16;
344 some(existing_id) { id = existing_id; }
346 add_u16(sub, id as u16);
348 add_u16(sub, vec::len(tps) as u16);
349 for tp: ty::t in tps {
350 let subshape = shape_of(ccx, tp, ty_param_map, is_obj_body);
351 add_u16(sub, vec::len(subshape) as u16);
361 add_substr(s, shape_of(ccx, mt.ty, ty_param_map, is_obj_body));
365 add_substr(s, shape_of(ccx, mt.ty, ty_param_map, is_obj_body));
369 add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty));
370 add_substr(s, shape_of(ccx, mt.ty, ty_param_map, is_obj_body));
375 for f: field in fields {
376 sub += shape_of(ccx, f.mt.ty, ty_param_map, is_obj_body);
384 sub += shape_of(ccx, elt, ty_param_map, is_obj_body);
388 ty::ty_native_fn(_, _) { s += [shape_u32]; }
389 ty::ty_obj(_) { s += [shape_obj]; }
390 ty::ty_res(did, raw_subt, tps) {
391 let subt = ty::substitute_type_params(ccx.tcx, tps, raw_subt);
392 let ri = {did: did, t: subt};
393 let id = interner::intern(ccx.shape_cx.resources, ri);
396 add_u16(s, id as u16);
397 add_u16(s, vec::len(tps) as u16);
398 for tp: ty::t in tps {
399 add_substr(s, shape_of(ccx, tp, ty_param_map, is_obj_body));
401 add_substr(s, shape_of(ccx, subt, ty_param_map, is_obj_body));
405 fail "shape_of ty_var";
409 // Just write in the parameter number.
410 s += [shape_var, n as u8];
412 // Find the type parameter in the parameter list.
413 alt vec::position(n, ty_param_map) {
414 some(i) { s += [shape_var, i as u8]; }
415 none. { fail "ty param not found in ty_param_map"; }
422 ty::ty_opaque_closure_ptr(_) {
423 s += [shape_opaque_closure_ptr];
430 // FIXME: We might discover other variants as we traverse these. Handle this.
431 fn shape_of_variant(ccx: @crate_ctxt, v: ty::variant_info,
432 ty_param_count: uint) -> [u8] {
433 let ty_param_map = [];
435 while i < ty_param_count { ty_param_map += [i]; i += 1u; }
438 for t: ty::t in v.args { s += shape_of(ccx, t, ty_param_map, false); }
442 fn gen_tag_shapes(ccx: @crate_ctxt) -> ValueRef {
443 // Loop over all the tag variants and write their shapes into a data
444 // buffer. As we do this, it's possible for us to discover new tags, so we
445 // must do this first.
449 while i < vec::len(ccx.shape_cx.tag_order) {
450 let did = ccx.shape_cx.tag_order[i];
451 let variants = ty::tag_variants(ccx.tcx, did);
452 let item_tyt = ty::lookup_item_type(ccx.tcx, did);
453 let ty_param_count = vec::len(*item_tyt.bounds);
455 for v: ty::variant_info in *variants {
456 offsets += [vec::len(data) as u16];
458 let variant_shape = shape_of_variant(ccx, v, ty_param_count);
459 add_substr(data, variant_shape);
465 // Now calculate the sizes of the header space (which contains offsets to
466 // info records for each tag) and the info space (which contains offsets
467 // to each variant shape). As we do so, build up the header.
471 let header_sz = 2u16 * ccx.shape_cx.next_tag_id;
472 let data_sz = vec::len(data) as u16;
475 for did_: ast::def_id in ccx.shape_cx.tag_order {
476 let did = did_; // Satisfy alias checker.
477 let variants = ty::tag_variants(ccx.tcx, did);
478 add_u16(header, header_sz + info_sz);
479 info_sz += 2u16 * ((vec::len(*variants) as u16) + 2u16) + 3u16;
482 // Construct the info tables, which contain offsets to the shape of each
483 // variant. Also construct the largest-variant table for each tag, which
484 // contains the variants that the size-of operation needs to look at.
488 for did_: ast::def_id in ccx.shape_cx.tag_order {
489 let did = did_; // Satisfy alias checker.
490 let variants = ty::tag_variants(ccx.tcx, did);
491 add_u16(info, vec::len(*variants) as u16);
493 // Construct the largest-variants table.
495 header_sz + info_sz + data_sz + (vec::len(lv_table) as u16));
497 let lv = largest_variants(ccx, did);
498 add_u16(lv_table, vec::len(lv) as u16);
499 for v: uint in lv { add_u16(lv_table, v as u16); }
501 // Determine whether the tag has dynamic size.
503 for variant: ty::variant_info in *variants {
504 for typ: ty::t in variant.args {
505 if ty::type_has_dynamic_size(ccx.tcx, typ) { dynamic = true; }
509 // If we can, write in the static size and alignment of the tag.
510 // Otherwise, write a placeholder.
513 size_align = {size: 0u16, align: 0u8};
514 } else { size_align = compute_static_tag_size(ccx, lv, did); }
515 add_u16(info, size_align.size);
516 info += [size_align.align];
518 // Now write in the offset of each variant.
519 for v: ty::variant_info in *variants {
520 add_u16(info, header_sz + info_sz + offsets[i]);
525 assert (i == vec::len(offsets));
526 assert (header_sz == vec::len(header) as u16);
527 assert (info_sz == vec::len(info) as u16);
528 assert (data_sz == vec::len(data) as u16);
534 ret mk_global(ccx, "tag_shapes", C_bytes(header), true);
537 fn gen_resource_shapes(ccx: @crate_ctxt) -> ValueRef {
540 let len = interner::len(ccx.shape_cx.resources);
542 let ri = interner::get(ccx.shape_cx.resources, i);
543 dtors += [trans_common::get_res_dtor(ccx, dummy_sp(), ri.did, ri.t)];
547 ret mk_global(ccx, "resource_shapes", C_struct(dtors), true);
550 fn gen_shape_tables(ccx: @crate_ctxt) {
551 let lltagstable = gen_tag_shapes(ccx);
552 let llresourcestable = gen_resource_shapes(ccx);
553 trans_common::set_struct_body(ccx.shape_cx.llshapetablesty,
554 [val_ty(lltagstable),
555 val_ty(llresourcestable)]);
558 C_named_struct(ccx.shape_cx.llshapetablesty,
559 [lltagstable, llresourcestable]);
560 lib::llvm::llvm::LLVMSetInitializer(ccx.shape_cx.llshapetables, lltables);
561 lib::llvm::llvm::LLVMSetGlobalConstant(ccx.shape_cx.llshapetables, True);
562 lib::llvm::llvm::LLVMSetLinkage(ccx.shape_cx.llshapetables,
563 lib::llvm::LLVMInternalLinkage as
564 lib::llvm::llvm::Linkage);