5 import std._str.rustrt.sbuf;
6 import std._vec.rustrt.vbuf;
8 import std.map.hashmap;
10 import std.option.some;
11 import std.option.none;
14 import driver.session;
19 import middle.ty.pat_ty;
20 import middle.ty.plain_ty;
23 import util.common.append;
24 import util.common.istr;
25 import util.common.new_def_hash;
26 import util.common.new_str_hash;
29 import lib.llvm.builder;
30 import lib.llvm.target_data;
31 import lib.llvm.type_handle;
32 import lib.llvm.type_names;
33 import lib.llvm.mk_pass_manager;
34 import lib.llvm.mk_target_data;
35 import lib.llvm.mk_type_handle;
36 import lib.llvm.mk_type_names;
37 import lib.llvm.llvm.ModuleRef;
38 import lib.llvm.llvm.ValueRef;
39 import lib.llvm.llvm.TypeRef;
40 import lib.llvm.llvm.TypeHandleRef;
41 import lib.llvm.llvm.BuilderRef;
42 import lib.llvm.llvm.BasicBlockRef;
44 import lib.llvm.False;
47 state obj namegen(mutable int i) {
48 fn next(str prefix) -> str {
54 type glue_fns = rec(ValueRef activate_glue,
56 ValueRef exit_task_glue,
57 vec[ValueRef] upcall_glues,
58 ValueRef no_op_type_glue,
61 ValueRef vec_append_glue);
63 type tydesc_info = rec(ValueRef tydesc,
67 state type crate_ctxt = rec(session.session sess,
72 hashmap[str, ValueRef] upcalls,
73 hashmap[str, ValueRef] intrinsics,
74 hashmap[str, ValueRef] item_names,
75 hashmap[ast.def_id, ValueRef] item_ids,
76 hashmap[ast.def_id, @ast.item] items,
78 @ast.native_item] native_items,
79 // TODO: hashmap[tup(tag_id,subtys), @tag_info]
80 hashmap[@ty.t, uint] tag_sizes,
81 hashmap[ast.def_id, ValueRef] discrims,
82 hashmap[ast.def_id, ValueRef] fn_pairs,
83 hashmap[ast.def_id, ValueRef] consts,
84 hashmap[ast.def_id,()] obj_methods,
85 hashmap[@ty.t, @tydesc_info] tydescs,
86 vec[ast.ty_param] obj_typarams,
87 vec[ast.obj_field] obj_fields,
92 state type fn_ctxt = rec(ValueRef llfn,
96 mutable option.t[ValueRef] llself,
97 mutable option.t[ValueRef] lliterbody,
98 hashmap[ast.def_id, ValueRef] llargs,
99 hashmap[ast.def_id, ValueRef] llobjfields,
100 hashmap[ast.def_id, ValueRef] lllocals,
101 hashmap[ast.def_id, ValueRef] lltydescs,
105 clean(fn(@block_ctxt cx) -> result);
114 state type block_ctxt = rec(BasicBlockRef llbb,
118 mutable vec[cleanup] cleanups,
121 // FIXME: we should be able to use option.t[@block_parent] here but
122 // the infinite-tag check in rustboot gets upset.
126 parent_some(@block_ctxt);
130 state type result = rec(mutable @block_ctxt bcx,
131 mutable ValueRef val);
137 fn res(@block_ctxt bcx, ValueRef val) -> result {
138 ret rec(mutable bcx = bcx,
142 fn ty_str(type_names tn, TypeRef t) -> str {
143 ret lib.llvm.type_to_str(tn, t);
146 fn val_ty(ValueRef v) -> TypeRef {
147 ret llvm.LLVMTypeOf(v);
150 fn val_str(type_names tn, ValueRef v) -> str {
151 ret ty_str(tn, val_ty(v));
155 // LLVM type constructors.
157 fn T_void() -> TypeRef {
158 // Note: For the time being llvm is kinda busted here, it has the notion
159 // of a 'void' type that can only occur as part of the signature of a
160 // function, but no general unit type of 0-sized value. This is, afaict,
161 // vestigial from its C heritage, and we'll be attempting to submit a
162 // patch upstream to fix it. In the mean time we only model function
163 // outputs (Rust functions and C functions) using T_void, and model the
164 // Rust general purpose nil type you can construct as 1-bit (always
165 // zero). This makes the result incorrect for now -- things like a tuple
166 // of 10 nil values will have 10-bit size -- but it doesn't seem like we
167 // have any other options until it's fixed upstream.
168 ret llvm.LLVMVoidType();
171 fn T_nil() -> TypeRef {
172 // NB: See above in T_void().
173 ret llvm.LLVMInt1Type();
176 fn T_i1() -> TypeRef {
177 ret llvm.LLVMInt1Type();
180 fn T_i8() -> TypeRef {
181 ret llvm.LLVMInt8Type();
184 fn T_i16() -> TypeRef {
185 ret llvm.LLVMInt16Type();
188 fn T_i32() -> TypeRef {
189 ret llvm.LLVMInt32Type();
192 fn T_i64() -> TypeRef {
193 ret llvm.LLVMInt64Type();
196 fn T_f32() -> TypeRef {
197 ret llvm.LLVMFloatType();
200 fn T_f64() -> TypeRef {
201 ret llvm.LLVMDoubleType();
204 fn T_bool() -> TypeRef {
208 fn T_int() -> TypeRef {
209 // FIXME: switch on target type.
213 fn T_char() -> TypeRef {
217 fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
218 ret llvm.LLVMFunctionType(output,
219 _vec.buf[TypeRef](inputs),
220 _vec.len[TypeRef](inputs),
224 fn T_fn_pair(type_names tn, TypeRef tfn) -> TypeRef {
225 ret T_struct(vec(T_ptr(tfn),
226 T_opaque_closure_ptr(tn)));
229 fn T_ptr(TypeRef t) -> TypeRef {
230 ret llvm.LLVMPointerType(t, 0u);
233 fn T_struct(vec[TypeRef] elts) -> TypeRef {
234 ret llvm.LLVMStructType(_vec.buf[TypeRef](elts),
235 _vec.len[TypeRef](elts),
239 fn T_opaque() -> TypeRef {
240 ret llvm.LLVMOpaqueType();
243 fn T_task(type_names tn) -> TypeRef {
245 if (tn.name_has_type(s)) {
249 auto t = T_struct(vec(T_int(), // Refcount
250 T_int(), // Delegate pointer
251 T_int(), // Stack segment pointer
252 T_int(), // Runtime SP
255 T_int(), // Domain pointer
256 T_int() // Crate cache pointer
262 fn T_glue_fn(type_names tn) -> TypeRef {
264 if (tn.name_has_type(s)) {
268 // Bit of a kludge: pick the fn typeref out of the tydesc..
269 let vec[TypeRef] tydesc_elts = _vec.init_elt[TypeRef](T_nil(), 10u);
270 llvm.LLVMGetStructElementTypes(T_tydesc(tn),
271 _vec.buf[TypeRef](tydesc_elts));
273 llvm.LLVMGetElementType
274 (tydesc_elts.(abi.tydesc_field_drop_glue_off));
279 fn T_tydesc(type_names tn) -> TypeRef {
282 if (tn.name_has_type(s)) {
286 auto th = mk_type_handle();
287 auto abs_tydesc = llvm.LLVMResolveTypeHandle(th.llth);
288 auto tydescpp = T_ptr(T_ptr(abs_tydesc));
289 auto pvoid = T_ptr(T_i8());
290 auto glue_fn_ty = T_ptr(T_fn(vec(T_ptr(T_nil()),
295 auto tydesc = T_struct(vec(tydescpp, // first_param
298 glue_fn_ty, // take_glue_off
299 glue_fn_ty, // drop_glue_off
300 glue_fn_ty, // free_glue_off
301 glue_fn_ty, // sever_glue_off
302 glue_fn_ty, // mark_glue_off
303 glue_fn_ty, // obj_drop_glue_off
304 glue_fn_ty)); // is_stateful
306 llvm.LLVMRefineType(abs_tydesc, tydesc);
307 auto t = llvm.LLVMResolveTypeHandle(th.llth);
312 fn T_array(TypeRef t, uint n) -> TypeRef {
313 ret llvm.LLVMArrayType(t, n);
316 fn T_vec(TypeRef t) -> TypeRef {
317 ret T_struct(vec(T_int(), // Refcount
320 T_array(t, 0u) // Body elements
324 fn T_opaque_vec_ptr() -> TypeRef {
325 ret T_ptr(T_vec(T_int()));
328 fn T_str() -> TypeRef {
332 fn T_box(TypeRef t) -> TypeRef {
333 ret T_struct(vec(T_int(), t));
336 fn T_crate(type_names tn) -> TypeRef {
338 if (tn.name_has_type(s)) {
342 auto t = T_struct(vec(T_int(), // ptrdiff_t image_base_off
343 T_int(), // uintptr_t self_addr
344 T_int(), // ptrdiff_t debug_abbrev_off
345 T_int(), // size_t debug_abbrev_sz
346 T_int(), // ptrdiff_t debug_info_off
347 T_int(), // size_t debug_info_sz
348 T_int(), // size_t activate_glue_off
349 T_int(), // size_t yield_glue_off
350 T_int(), // size_t unwind_glue_off
351 T_int(), // size_t gc_glue_off
352 T_int(), // size_t main_exit_task_glue_off
353 T_int(), // int n_rust_syms
354 T_int(), // int n_c_syms
355 T_int(), // int n_libs
356 T_int() // uintptr_t abi_tag
362 fn T_double() -> TypeRef {
363 ret llvm.LLVMDoubleType();
366 fn T_taskptr(type_names tn) -> TypeRef {
367 ret T_ptr(T_task(tn));
370 // This type must never be used directly; it must always be cast away.
371 fn T_typaram(type_names tn) -> TypeRef {
373 if (tn.name_has_type(s)) {
382 fn T_typaram_ptr(type_names tn) -> TypeRef {
383 ret T_ptr(T_typaram(tn));
386 fn T_closure_ptr(type_names tn,
388 TypeRef llbindings_ty,
389 uint n_ty_params) -> TypeRef {
390 ret T_ptr(T_box(T_struct(vec(T_ptr(T_tydesc(tn)),
393 T_captured_tydescs(tn, n_ty_params))
397 fn T_opaque_closure_ptr(type_names tn) -> TypeRef {
399 if (tn.name_has_type(s)) {
402 auto t = T_closure_ptr(tn, T_struct(vec(T_ptr(T_nil()),
410 fn T_tag(type_names tn, uint size) -> TypeRef {
411 auto s = "tag_" + _uint.to_str(size, 10u);
412 if (tn.name_has_type(s)) {
415 auto t = T_struct(vec(T_int(), T_array(T_i8(), size)));
420 fn T_opaque_tag(type_names tn) -> TypeRef {
422 if (tn.name_has_type(s)) {
425 auto t = T_struct(vec(T_int(), T_i8()));
430 fn T_opaque_tag_ptr(type_names tn) -> TypeRef {
431 ret T_ptr(T_opaque_tag(tn));
434 fn T_captured_tydescs(type_names tn, uint n) -> TypeRef {
435 ret T_struct(_vec.init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
438 fn T_obj_ptr(type_names tn, uint n_captured_tydescs) -> TypeRef {
439 // This function is not publicly exposed because it returns an incomplete
440 // type. The dynamically-sized fields follow the captured tydescs.
441 fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
442 ret T_struct(vec(T_ptr(T_tydesc(tn)),
443 T_captured_tydescs(tn, n_captured_tydescs)));
446 ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
449 fn T_opaque_obj_ptr(type_names tn) -> TypeRef {
450 ret T_obj_ptr(tn, 0u);
454 // This function now fails if called on a type with dynamic size (as its
455 // return value was always meaningless in that case anyhow). Beware!
457 // TODO: Enforce via a predicate.
458 fn type_of(@crate_ctxt cx, @ty.t t) -> TypeRef {
459 if (ty.type_has_dynamic_size(t)) {
460 log "type_of() called on a type with dynamic size: " +
465 ret type_of_inner(cx, t, false);
468 fn type_of_explicit_args(@crate_ctxt cx,
469 vec[ty.arg] inputs) -> vec[TypeRef] {
470 let vec[TypeRef] atys = vec();
471 for (ty.arg arg in inputs) {
472 if (ty.type_has_dynamic_size(arg.ty)) {
473 check (arg.mode == ast.alias);
474 atys += T_typaram_ptr(cx.tn);
479 t = T_ptr(type_of_inner(cx, arg.ty, true));
482 t = type_of_inner(cx, arg.ty, false);
491 // NB: must keep 4 fns in sync:
494 // - create_llargs_for_fn_args.
498 fn type_of_fn_full(@crate_ctxt cx,
500 option.t[TypeRef] obj_self,
502 @ty.t output) -> TypeRef {
503 let vec[TypeRef] atys = vec();
505 // Arg 0: Output pointer.
506 if (ty.type_has_dynamic_size(output)) {
507 atys += T_typaram_ptr(cx.tn);
509 atys += T_ptr(type_of_inner(cx, output, false));
512 // Arg 1: Task pointer.
513 atys += T_taskptr(cx.tn);
515 // Arg 2: Env (closure-bindings / self-obj)
517 case (some[TypeRef](?t)) {
518 check (t as int != 0);
522 atys += T_opaque_closure_ptr(cx.tn);
526 // Args >3: ty params, if not acquired via capture...
527 if (obj_self == none[TypeRef]) {
528 auto ty_param_count =
529 ty.count_ty_params(plain_ty(ty.ty_fn(proto,
533 while (i < ty_param_count) {
534 atys += T_ptr(T_tydesc(cx.tn));
539 if (proto == ast.proto_iter) {
540 // If it's an iter, the 'output' type of the iter is actually the
541 // *input* type of the function we're given as our iter-block
543 atys += T_fn_pair(cx.tn,
544 type_of_fn_full(cx, ast.proto_fn, none[TypeRef],
545 vec(rec(mode=ast.val, ty=output)),
546 plain_ty(ty.ty_nil)));
549 // ... then explicit args.
550 atys += type_of_explicit_args(cx, inputs);
552 ret T_fn(atys, llvm.LLVMVoidType());
555 fn type_of_fn(@crate_ctxt cx,
557 vec[ty.arg] inputs, @ty.t output) -> TypeRef {
558 ret type_of_fn_full(cx, proto, none[TypeRef], inputs, output);
561 fn type_of_native_fn(@crate_ctxt cx, ast.native_abi abi,
563 @ty.t output) -> TypeRef {
564 let vec[TypeRef] atys = vec();
565 if (abi == ast.native_abi_rust) {
566 atys += T_taskptr(cx.tn);
567 auto t = ty.ty_native_fn(abi, inputs, output);
568 auto ty_param_count = ty.count_ty_params(plain_ty(t));
570 while (i < ty_param_count) {
571 atys += T_ptr(T_tydesc(cx.tn));
575 atys += type_of_explicit_args(cx, inputs);
576 ret T_fn(atys, type_of_inner(cx, output, false));
579 fn type_of_inner(@crate_ctxt cx, @ty.t t, bool boxed) -> TypeRef {
580 let TypeRef llty = 0 as TypeRef;
583 case (ty.ty_native) { llty = T_ptr(T_i8()); }
584 case (ty.ty_nil) { llty = T_nil(); }
585 case (ty.ty_bool) { llty = T_bool(); }
586 case (ty.ty_int) { llty = T_int(); }
587 case (ty.ty_uint) { llty = T_int(); }
588 case (ty.ty_machine(?tm)) {
590 case (common.ty_i8) { llty = T_i8(); }
591 case (common.ty_u8) { llty = T_i8(); }
592 case (common.ty_i16) { llty = T_i16(); }
593 case (common.ty_u16) { llty = T_i16(); }
594 case (common.ty_i32) { llty = T_i32(); }
595 case (common.ty_u32) { llty = T_i32(); }
596 case (common.ty_i64) { llty = T_i64(); }
597 case (common.ty_u64) { llty = T_i64(); }
598 case (common.ty_f32) { llty = T_f32(); }
599 case (common.ty_f64) { llty = T_f64(); }
602 case (ty.ty_char) { llty = T_char(); }
603 case (ty.ty_str) { llty = T_ptr(T_str()); }
604 case (ty.ty_tag(_, _)) {
606 llty = T_opaque_tag(cx.tn);
608 auto size = static_size_of_tag(cx, t);
609 llty = T_tag(cx.tn, size);
612 case (ty.ty_box(?t)) {
613 llty = T_ptr(T_box(type_of_inner(cx, t, true)));
615 case (ty.ty_vec(?t)) {
616 llty = T_ptr(T_vec(type_of_inner(cx, t, true)));
618 case (ty.ty_tup(?elts)) {
619 let vec[TypeRef] tys = vec();
620 for (@ty.t elt in elts) {
621 tys += type_of_inner(cx, elt, boxed);
623 llty = T_struct(tys);
625 case (ty.ty_rec(?fields)) {
626 let vec[TypeRef] tys = vec();
627 for (ty.field f in fields) {
628 tys += type_of_inner(cx, f.ty, boxed);
630 llty = T_struct(tys);
632 case (ty.ty_fn(?proto, ?args, ?out)) {
633 llty = T_fn_pair(cx.tn, type_of_fn(cx, proto, args, out));
635 case (ty.ty_native_fn(?abi, ?args, ?out)) {
636 llty = T_fn_pair(cx.tn, type_of_native_fn(cx, abi, args, out));
638 case (ty.ty_obj(?meths)) {
639 auto th = mk_type_handle();
640 auto self_ty = llvm.LLVMResolveTypeHandle(th.llth);
642 let vec[TypeRef] mtys = vec();
643 for (ty.method m in meths) {
645 type_of_fn_full(cx, m.proto,
646 some[TypeRef](self_ty),
650 let TypeRef vtbl = T_struct(mtys);
651 let TypeRef pair = T_struct(vec(T_ptr(vtbl),
652 T_opaque_obj_ptr(cx.tn)));
654 auto abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
655 llvm.LLVMRefineType(abs_pair, pair);
656 abs_pair = llvm.LLVMResolveTypeHandle(th.llth);
659 case (ty.ty_var(_)) {
660 log "ty_var in trans.type_of";
663 case (ty.ty_param(_)) {
666 case (ty.ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
669 check (llty as int != 0);
670 llvm.LLVMAddTypeName(cx.llmod, _str.buf(ty.ty_to_str(t)), llty);
674 fn type_of_arg(@crate_ctxt cx, &ty.arg arg) -> TypeRef {
675 alt (arg.ty.struct) {
676 case (ty.ty_param(_)) {
677 if (arg.mode == ast.alias) {
678 ret T_typaram_ptr(cx.tn);
687 if (arg.mode == ast.alias) {
688 typ = T_ptr(type_of_inner(cx, arg.ty, true));
690 typ = type_of_inner(cx, arg.ty, false);
695 // Name sanitation. LLVM will happily accept identifiers with weird names, but
698 fn sanitize(str s) -> str {
701 if (c == ('@' as u8)) {
704 if (c == (',' as u8)) {
707 if (c == ('{' as u8) || c == ('(' as u8)) {
710 if (c != 10u8 && c != ('}' as u8) && c != (')' as u8) &&
711 c != (' ' as u8) && c != ('\t' as u8) &&
714 result += _str.from_bytes(v);
723 // LLVM constant constructors.
725 fn C_null(TypeRef t) -> ValueRef {
726 ret llvm.LLVMConstNull(t);
729 fn C_integral(int i, TypeRef t) -> ValueRef {
730 // FIXME. We can't use LLVM.ULongLong with our existing minimal native
731 // API, which only knows word-sized args. Lucky for us LLVM has a "take a
732 // string encoding" version. Hilarious. Please fix to handle:
734 // ret llvm.LLVMConstInt(T_int(), t as LLVM.ULongLong, False);
736 ret llvm.LLVMConstIntOfString(t, _str.buf(istr(i)), 10);
739 fn C_nil() -> ValueRef {
740 // NB: See comment above in T_void().
741 ret C_integral(0, T_i1());
744 fn C_bool(bool b) -> ValueRef {
746 ret C_integral(1, T_bool());
748 ret C_integral(0, T_bool());
752 fn C_int(int i) -> ValueRef {
753 ret C_integral(i, T_int());
756 // This is a 'c-like' raw string, which differs from
757 // our boxed-and-length-annotated strings.
758 fn C_cstr(@crate_ctxt cx, str s) -> ValueRef {
759 auto sc = llvm.LLVMConstString(_str.buf(s), _str.byte_len(s), False);
760 auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(sc),
761 _str.buf(cx.names.next("str")));
762 llvm.LLVMSetInitializer(g, sc);
763 llvm.LLVMSetGlobalConstant(g, True);
764 llvm.LLVMSetLinkage(g, lib.llvm.LLVMPrivateLinkage
769 // A rust boxed-and-length-annotated string.
770 fn C_str(@crate_ctxt cx, str s) -> ValueRef {
771 auto len = _str.byte_len(s);
772 auto box = C_struct(vec(C_int(abi.const_refcount as int),
773 C_int(len + 1u as int), // 'alloc'
774 C_int(len + 1u as int), // 'fill'
775 llvm.LLVMConstString(_str.buf(s),
777 auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(box),
778 _str.buf(cx.names.next("str")));
779 llvm.LLVMSetInitializer(g, box);
780 llvm.LLVMSetGlobalConstant(g, True);
781 llvm.LLVMSetLinkage(g, lib.llvm.LLVMPrivateLinkage
783 ret llvm.LLVMConstPointerCast(g, T_ptr(T_str()));
786 fn C_zero_byte_arr(uint size) -> ValueRef {
788 let vec[ValueRef] elts = vec();
790 elts += vec(C_integral(0, T_i8()));
793 ret llvm.LLVMConstArray(T_i8(), _vec.buf[ValueRef](elts),
794 _vec.len[ValueRef](elts));
797 fn C_struct(vec[ValueRef] elts) -> ValueRef {
798 ret llvm.LLVMConstStruct(_vec.buf[ValueRef](elts),
799 _vec.len[ValueRef](elts),
803 fn decl_fn(ModuleRef llmod, str name, uint cc, TypeRef llty) -> ValueRef {
805 llvm.LLVMAddFunction(llmod, _str.buf(name), llty);
806 llvm.LLVMSetFunctionCallConv(llfn, cc);
810 fn decl_cdecl_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
811 ret decl_fn(llmod, name, lib.llvm.LLVMCCallConv, llty);
814 fn decl_fastcall_fn(ModuleRef llmod, str name, TypeRef llty) -> ValueRef {
815 ret decl_fn(llmod, name, lib.llvm.LLVMFastCallConv, llty);
818 fn decl_glue(ModuleRef llmod, type_names tn, str s) -> ValueRef {
819 ret decl_cdecl_fn(llmod, s, T_fn(vec(T_taskptr(tn)), T_void()));
822 fn decl_upcall_glue(ModuleRef llmod, type_names tn, uint _n) -> ValueRef {
823 // It doesn't actually matter what type we come up with here, at the
824 // moment, as we cast the upcall function pointers to int before passing
825 // them to the indirect upcall-invocation glue. But eventually we'd like
826 // to call them directly, once we have a calling convention worked out.
827 let int n = _n as int;
828 let str s = abi.upcall_glue_name(n);
829 let vec[TypeRef] args =
830 vec(T_taskptr(tn), // taskptr
832 + _vec.init_elt[TypeRef](T_int(), n as uint);
834 ret decl_fastcall_fn(llmod, s, T_fn(args, T_int()));
837 fn get_upcall(@crate_ctxt cx, str name, int n_args) -> ValueRef {
838 if (cx.upcalls.contains_key(name)) {
839 ret cx.upcalls.get(name);
841 auto inputs = vec(T_taskptr(cx.tn));
842 inputs += _vec.init_elt[TypeRef](T_int(), n_args as uint);
843 auto output = T_int();
844 auto f = decl_cdecl_fn(cx.llmod, name, T_fn(inputs, output));
845 cx.upcalls.insert(name, f);
849 fn trans_upcall(@block_ctxt cx, str name, vec[ValueRef] args) -> result {
850 let int n = _vec.len[ValueRef](args) as int;
851 let ValueRef llupcall = get_upcall(cx.fcx.ccx, name, n);
852 llupcall = llvm.LLVMConstPointerCast(llupcall, T_int());
854 let ValueRef llglue = cx.fcx.ccx.glues.upcall_glues.(n);
855 let vec[ValueRef] call_args = vec(cx.fcx.lltaskptr, llupcall);
857 for (ValueRef a in args) {
858 call_args += cx.build.ZExtOrBitCast(a, T_int());
861 ret res(cx, cx.build.FastCall(llglue, call_args));
864 fn trans_non_gc_free(@block_ctxt cx, ValueRef v) -> result {
865 ret trans_upcall(cx, "upcall_free", vec(vp2i(cx, v),
869 fn find_scope_cx(@block_ctxt cx) -> @block_ctxt {
870 if (cx.kind == SCOPE_BLOCK) {
874 case (parent_some(?b)) {
883 fn umax(@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
884 auto cond = cx.build.ICmp(lib.llvm.LLVMIntULT, a, b);
885 ret cx.build.Select(cond, b, a);
888 fn align_to(@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
889 auto mask = cx.build.Sub(align, C_int(1));
890 auto bumped = cx.build.Add(off, mask);
891 ret cx.build.And(bumped, cx.build.Not(mask));
894 // Returns the real size of the given type for the current target.
895 fn llsize_of_real(@crate_ctxt cx, TypeRef t) -> uint {
896 ret llvm.LLVMStoreSizeOfType(cx.td.lltd, t);
899 fn llsize_of(TypeRef t) -> ValueRef {
900 ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMSizeOf(t), T_int(), False);
903 fn llalign_of(TypeRef t) -> ValueRef {
904 ret llvm.LLVMConstIntCast(lib.llvm.llvm.LLVMAlignOf(t), T_int(), False);
907 fn size_of(@block_ctxt cx, @ty.t t) -> result {
908 if (!ty.type_has_dynamic_size(t)) {
909 ret res(cx, llsize_of(type_of(cx.fcx.ccx, t)));
911 ret dynamic_size_of(cx, t);
914 fn align_of(@block_ctxt cx, @ty.t t) -> result {
915 if (!ty.type_has_dynamic_size(t)) {
916 ret res(cx, llalign_of(type_of(cx.fcx.ccx, t)));
918 ret dynamic_align_of(cx, t);
921 // Computes the size of the data part of a non-dynamically-sized tag.
922 fn static_size_of_tag(@crate_ctxt cx, @ty.t t) -> uint {
923 if (ty.type_has_dynamic_size(t)) {
924 log "dynamically sized type passed to static_size_of_tag()";
928 if (cx.tag_sizes.contains_key(t)) {
929 ret cx.tag_sizes.get(t);
933 let vec[@ty.t] subtys;
935 case (ty.ty_tag(?tid_, ?subtys_)) {
940 log "non-tag passed to static_size_of_tag()";
945 // Compute max(variant sizes).
947 auto variants = tag_variants(cx, tid);
948 for (ast.variant variant in variants) {
949 let vec[@ty.t] tys = variant_types(cx, variant);
950 auto tup_ty = ty.plain_ty(ty.ty_tup(tys));
952 // Here we possibly do a recursive call.
953 auto this_size = llsize_of_real(cx, type_of(cx, tup_ty));
955 if (max_size < this_size) {
956 max_size = this_size;
960 cx.tag_sizes.insert(t, max_size);
964 fn dynamic_size_of(@block_ctxt cx, @ty.t t) -> result {
965 fn align_elements(@block_ctxt cx, vec[@ty.t] elts) -> result {
970 // - Pad after each element so that next element is aligned.
971 // - Pad after final structure member so that whole structure
972 // is aligned to max alignment of interior.
975 auto max_align = C_int(1);
977 for (@ty.t e in elts) {
978 auto elt_align = align_of(bcx, e);
980 auto elt_size = size_of(bcx, e);
982 auto aligned_off = align_to(bcx, off, elt_align.val);
983 off = cx.build.Add(aligned_off, elt_size.val);
984 max_align = umax(bcx, max_align, elt_align.val);
986 off = align_to(bcx, off, max_align);
991 case (ty.ty_param(?p)) {
992 auto szptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
993 ret res(szptr.bcx, szptr.bcx.build.Load(szptr.val));
995 case (ty.ty_tup(?elts)) {
996 ret align_elements(cx, elts);
998 case (ty.ty_rec(?flds)) {
999 let vec[@ty.t] tys = vec();
1000 for (ty.field f in flds) {
1003 ret align_elements(cx, tys);
1005 case (ty.ty_tag(?tid, ?tps)) {
1008 // Compute max(variant sizes).
1009 let ValueRef max_size = bcx.build.Alloca(T_int());
1010 bcx.build.Store(C_int(0), max_size);
1012 auto variants = tag_variants(bcx.fcx.ccx, tid);
1013 for (ast.variant variant in variants) {
1014 let vec[@ty.t] tys = variant_types(bcx.fcx.ccx, variant);
1015 auto rslt = align_elements(bcx, tys);
1018 auto this_size = rslt.val;
1019 auto old_max_size = bcx.build.Load(max_size);
1020 bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
1023 auto max_size_val = bcx.build.Load(max_size);
1024 auto total_size = bcx.build.Add(max_size_val, llsize_of(T_int()));
1025 ret res(bcx, total_size);
1030 fn dynamic_align_of(@block_ctxt cx, @ty.t t) -> result {
1032 case (ty.ty_param(?p)) {
1033 auto aptr = field_of_tydesc(cx, t, abi.tydesc_field_align);
1034 ret res(aptr.bcx, aptr.bcx.build.Load(aptr.val));
1036 case (ty.ty_tup(?elts)) {
1039 for (@ty.t e in elts) {
1040 auto align = align_of(bcx, e);
1042 a = umax(bcx, a, align.val);
1046 case (ty.ty_rec(?flds)) {
1049 for (ty.field f in flds) {
1050 auto align = align_of(bcx, f.ty);
1052 a = umax(bcx, a, align.val);
1059 // Replacement for the LLVM 'GEP' instruction when field-indexing into a
1060 // tuple-like structure (tup, rec) with a static index. This one is driven off
1061 // ty.struct and knows what to do when it runs into a ty_param stuck in the
1062 // middle of the thing it's GEP'ing into. Much like size_of and align_of,
1065 fn GEP_tup_like(@block_ctxt cx, @ty.t t,
1066 ValueRef base, vec[int] ixs) -> result {
1068 check (ty.type_is_tup_like(t));
1070 // It might be a static-known type. Handle this.
1072 if (! ty.type_has_dynamic_size(t)) {
1073 let vec[ValueRef] v = vec();
1074 for (int i in ixs) {
1077 ret res(cx, cx.build.GEP(base, v));
1080 // It is a dynamic-containing type that, if we convert directly to an LLVM
1081 // TypeRef, will be all wrong; there's no proper LLVM type to represent
1082 // it, and the lowering function will stick in i8* values for each
1083 // ty_param, which is not right; the ty_params are all of some dynamic
1086 // What we must do instead is sadder. We must look through the indices
1087 // manually and split the input type into a prefix and a target. We then
1088 // measure the prefix size, bump the input pointer by that amount, and
1089 // cast to a pointer-to-target type.
1092 // Given a type, an index vector and an element number N in that vector,
1093 // calculate index X and the type that results by taking the first X-1
1094 // elements of the type and splitting the Xth off. Return the prefix as
1095 // well as the innermost Xth type.
1097 fn split_type(@ty.t t, vec[int] ixs, uint n)
1098 -> rec(vec[@ty.t] prefix, @ty.t target) {
1100 let uint len = _vec.len[int](ixs);
1102 // We don't support 0-index or 1-index GEPs. The former is nonsense
1103 // and the latter would only be meaningful if we supported non-0
1104 // values for the 0th index (we don't).
1109 // Since we're starting from a value that's a pointer to a
1110 // *single* structure, the first index (in GEP-ese) should just be
1111 // 0, to yield the pointee.
1112 check (ixs.(n) == 0);
1113 ret split_type(t, ixs, n+1u);
1118 let int ix = ixs.(n);
1119 let vec[@ty.t] prefix = vec();
1122 append[@ty.t](prefix, ty.get_element_type(t, i as uint));
1126 auto selected = ty.get_element_type(t, i as uint);
1129 // We are at the innermost index.
1130 ret rec(prefix=prefix, target=selected);
1133 // Not the innermost index; call self recursively to dig deeper.
1134 // Once we get an inner result, append it current prefix and
1135 // return to caller.
1136 auto inner = split_type(selected, ixs, n+1u);
1137 prefix += inner.prefix;
1138 ret rec(prefix=prefix with inner);
1142 // We make a fake prefix tuple-type here; luckily for measuring sizes
1143 // the tuple parens are associative so it doesn't matter that we've
1144 // flattened the incoming structure.
1146 auto s = split_type(t, ixs, 0u);
1147 auto prefix_ty = plain_ty(ty.ty_tup(s.prefix));
1149 auto sz = size_of(bcx, prefix_ty);
1151 auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
1152 auto bumped = bcx.build.GEP(raw, vec(sz.val));
1154 if (ty.type_has_dynamic_size(s.target)) {
1155 ret res(bcx, bumped);
1158 auto typ = T_ptr(type_of(bcx.fcx.ccx, s.target));
1159 ret res(bcx, bcx.build.PointerCast(bumped, typ));
1162 // Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
1163 // This function uses GEP_tup_like() above and automatically performs casts as
1164 // appropriate. @llblobptr is the data part of a tag value; its actual type is
1165 // meaningless, as it will be cast away.
1166 fn GEP_tag(@block_ctxt cx, ValueRef llblobptr, &ast.variant variant, int ix)
1168 // Synthesize a tuple type so that GEP_tup_like() can work its magic.
1169 // Separately, store the type of the element we're interested in.
1170 auto arg_tys = arg_tys_of_fn(variant.ann);
1171 auto elem_ty = ty.plain_ty(ty.ty_nil); // typestate infelicity
1173 let vec[@ty.t] true_arg_tys = vec();
1174 for (ty.arg a in arg_tys) {
1175 true_arg_tys += vec(a.ty);
1182 auto tup_ty = ty.plain_ty(ty.ty_tup(true_arg_tys));
1184 // Cast the blob pointer to the appropriate type, if we need to (i.e. if
1185 // the blob pointer isn't dynamically sized).
1186 let ValueRef llunionptr;
1187 if (!ty.type_has_dynamic_size(tup_ty)) {
1188 auto llty = type_of(cx.fcx.ccx, tup_ty);
1189 llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
1191 llunionptr = llblobptr;
1194 // Do the GEP_tup_like().
1195 auto rslt = GEP_tup_like(cx, tup_ty, llunionptr, vec(0, ix));
1197 // Cast the result to the appropriate type, if necessary.
1199 if (!ty.type_has_dynamic_size(elem_ty)) {
1200 auto llelemty = type_of(rslt.bcx.fcx.ccx, elem_ty);
1201 val = rslt.bcx.build.PointerCast(rslt.val, T_ptr(llelemty));
1206 ret res(rslt.bcx, val);
1210 fn trans_raw_malloc(@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize)
1212 // FIXME: need a table to collect tydesc globals.
1213 auto tydesc = C_int(0);
1214 auto rslt = trans_upcall(cx, "upcall_malloc", vec(llsize, tydesc));
1215 rslt = res(rslt.bcx, vi2p(cx, rslt.val, llptr_ty));
1219 fn trans_malloc_boxed(@block_ctxt cx, @ty.t t) -> result {
1220 // Synthesize a fake box type structurally so we have something
1221 // to measure the size of.
1222 auto boxed_body = plain_ty(ty.ty_tup(vec(plain_ty(ty.ty_int), t)));
1223 auto box_ptr = plain_ty(ty.ty_box(t));
1224 auto sz = size_of(cx, boxed_body);
1225 auto llty = type_of(cx.fcx.ccx, box_ptr);
1226 ret trans_raw_malloc(sz.bcx, llty, sz.val);
1230 // Type descriptor and type glue stuff
1232 // Given a type and a field index into its corresponding type descriptor,
1233 // returns an LLVM ValueRef of that field from the tydesc, generating the
1234 // tydesc if necessary.
1235 fn field_of_tydesc(@block_ctxt cx, @ty.t t, int field) -> result {
1236 auto tydesc = get_tydesc(cx, t);
1238 tydesc.bcx.build.GEP(tydesc.val, vec(C_int(0), C_int(field))));
1241 // Given a type containing ty params, build a vector containing a ValueRef for
1242 // each of the ty params it uses (from the current frame), as well as a vec
1243 // containing a def_id for each such param. This is used solely for
1244 // constructing derived tydescs.
1245 fn linearize_ty_params(@block_ctxt cx, @ty.t t)
1246 -> tup(vec[ast.def_id], vec[ValueRef]) {
1247 let vec[ValueRef] param_vals = vec();
1248 let vec[ast.def_id] param_defs = vec();
1249 type rr = rec(@block_ctxt cx,
1250 mutable vec[ValueRef] vals,
1251 mutable vec[ast.def_id] defs);
1253 state obj folder(@rr r) {
1254 fn fold_simple_ty(@ty.t t) -> @ty.t {
1256 case (ty.ty_param(?pid)) {
1257 let bool seen = false;
1258 for (ast.def_id d in r.defs) {
1264 r.vals += r.cx.fcx.lltydescs.get(pid);
1275 auto x = @rec(cx = cx,
1276 mutable vals = param_vals,
1277 mutable defs = param_defs);
1279 ty.fold_ty(folder(x), t);
1281 ret tup(x.defs, x.vals);
1284 fn get_tydesc(&@block_ctxt cx, @ty.t t) -> result {
1285 // Is the supplied type a type param? If so, return the passed-in tydesc.
1286 alt (ty.type_param(t)) {
1287 case (some[ast.def_id](?id)) {
1288 check (cx.fcx.lltydescs.contains_key(id));
1289 ret res(cx, cx.fcx.lltydescs.get(id));
1291 case (none[ast.def_id]) { /* fall through */ }
1294 // Does it contain a type param? If so, generate a derived tydesc.
1295 let uint n_params = ty.count_ty_params(t);
1297 if (ty.count_ty_params(t) > 0u) {
1298 auto tys = linearize_ty_params(cx, t);
1300 check (n_params == _vec.len[ast.def_id](tys._0));
1301 check (n_params == _vec.len[ValueRef](tys._1));
1303 if (!cx.fcx.ccx.tydescs.contains_key(t)) {
1304 declare_tydesc(cx.fcx.ccx, t);
1305 define_tydesc(cx.fcx.ccx, t, tys._0);
1308 auto root = cx.fcx.ccx.tydescs.get(t).tydesc;
1310 auto tydescs = cx.build.Alloca(T_array(T_ptr(T_tydesc(cx.fcx.ccx.tn)),
1314 auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
1315 cx.build.Store(root, tdp);
1317 for (ValueRef td in tys._1) {
1318 auto tdp = cx.build.GEP(tydescs, vec(C_int(0), C_int(i)));
1319 cx.build.Store(td, tdp);
1324 auto sz = size_of(bcx, t);
1326 auto align = align_of(bcx, t);
1329 auto v = trans_upcall(bcx, "upcall_get_type_desc",
1330 vec(p2i(bcx.fcx.ccx.crate_ptr),
1333 C_int((1u + n_params) as int),
1334 vp2i(bcx, tydescs)));
1336 ret res(v.bcx, vi2p(v.bcx, v.val,
1337 T_ptr(T_tydesc(cx.fcx.ccx.tn))));
1340 // Otherwise, generate a tydesc if necessary, and return it.
1341 if (!cx.fcx.ccx.tydescs.contains_key(t)) {
1342 let vec[ast.def_id] defs = vec();
1343 declare_tydesc(cx.fcx.ccx, t);
1344 define_tydesc(cx.fcx.ccx, t, defs);
1346 ret res(cx, cx.fcx.ccx.tydescs.get(t).tydesc);
1349 // Generates the declaration for (but doesn't fill in) a type descriptor. This
1350 // needs to be separate from make_tydesc() below, because sometimes type glue
1351 // functions needs to refer to their own type descriptors.
1352 fn declare_tydesc(@crate_ctxt cx, @ty.t t) {
1353 auto take_glue = declare_generic_glue(cx, t, "take");
1354 auto drop_glue = declare_generic_glue(cx, t, "drop");
1358 if (!ty.type_has_dynamic_size(t)) {
1359 auto llty = type_of(cx, t);
1360 llsize = llsize_of(llty);
1361 llalign = llalign_of(llty);
1363 // These will be overwritten as the derived tydesc is generated, so
1364 // we create placeholder values.
1369 auto glue_fn_ty = T_ptr(T_glue_fn(cx.tn));
1371 // FIXME: this adjustment has to do with the ridiculous encoding of
1372 // glue-pointer-constants in the tydesc records: They are tydesc-relative
1373 // displacements. This is purely for compatibility with rustboot and
1374 // should go when it is discarded.
1375 fn off(ValueRef tydescp,
1376 ValueRef gluefn) -> ValueRef {
1377 ret i2p(llvm.LLVMConstSub(p2i(gluefn), p2i(tydescp)),
1381 auto name = sanitize(cx.names.next("tydesc_" + ty.ty_to_str(t)));
1382 auto gvar = llvm.LLVMAddGlobal(cx.llmod, T_tydesc(cx.tn),
1384 auto tydesc = C_struct(vec(C_null(T_ptr(T_ptr(T_tydesc(cx.tn)))),
1387 off(gvar, take_glue), // take_glue_off
1388 off(gvar, drop_glue), // drop_glue_off
1389 C_null(glue_fn_ty), // free_glue_off
1390 C_null(glue_fn_ty), // sever_glue_off
1391 C_null(glue_fn_ty), // mark_glue_off
1392 C_null(glue_fn_ty), // obj_drop_glue_off
1393 C_null(glue_fn_ty))); // is_stateful
1395 llvm.LLVMSetInitializer(gvar, tydesc);
1396 llvm.LLVMSetGlobalConstant(gvar, True);
1397 llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
1402 take_glue=take_glue,
1406 cx.tydescs.insert(t, @info);
1409 // declare_tydesc() above must have been called first.
1410 fn define_tydesc(@crate_ctxt cx, @ty.t t, vec[ast.def_id] typaram_defs) {
1411 auto info = cx.tydescs.get(t);
1412 auto gvar = info.tydesc;
1414 auto tg = make_take_glue;
1415 auto take_glue = make_generic_glue(cx, t, info.take_glue, tg,
1417 auto dg = make_drop_glue;
1418 auto drop_glue = make_generic_glue(cx, t, info.drop_glue, dg,
1422 fn declare_generic_glue(@crate_ctxt cx, @ty.t t, str name) -> ValueRef {
1423 auto llfnty = T_glue_fn(cx.tn);
1425 auto fn_name = cx.names.next("_rust_" + name) + sep() + ty.ty_to_str(t);
1426 fn_name = sanitize(fn_name);
1427 ret decl_fastcall_fn(cx.llmod, fn_name, llfnty);
1430 fn make_generic_glue(@crate_ctxt cx, @ty.t t, ValueRef llfn,
1431 val_and_ty_fn helper,
1432 vec[ast.def_id] typaram_defs) -> ValueRef {
1433 auto fcx = new_fn_ctxt(cx, llfn);
1434 auto bcx = new_top_block_ctxt(fcx);
1437 if (!ty.type_is_scalar(t)) {
1439 if (ty.type_has_dynamic_size(t)) {
1440 llty = T_ptr(T_i8());
1441 } else if (ty.type_is_structural(t)) {
1442 llty = T_ptr(type_of(cx, t));
1444 llty = type_of(cx, t);
1447 auto lltyparams = llvm.LLVMGetParam(llfn, 3u);
1449 for (ast.def_id d in typaram_defs) {
1450 auto llparam = bcx.build.GEP(lltyparams, vec(C_int(p)));
1451 llparam = bcx.build.Load(llparam);
1452 bcx.fcx.lltydescs.insert(d, llparam);
1456 auto llrawptr = llvm.LLVMGetParam(llfn, 4u);
1457 auto llval = bcx.build.BitCast(llrawptr, llty);
1459 re = helper(bcx, llval, t);
1461 re = res(bcx, C_nil());
1464 re.bcx.build.RetVoid();
1468 fn make_take_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
1469 if (ty.type_is_boxed(t)) {
1470 ret incr_refcnt_of_boxed(cx, v);
1472 } else if (ty.type_is_structural(t)) {
1473 ret iter_structural_ty(cx, v, t,
1474 bind incr_all_refcnts(_, _, _));
1476 ret res(cx, C_nil());
1479 fn incr_refcnt_of_boxed(@block_ctxt cx, ValueRef box_ptr) -> result {
1480 auto rc_ptr = cx.build.GEP(box_ptr, vec(C_int(0),
1481 C_int(abi.box_rc_field_refcnt)));
1482 auto rc = cx.build.Load(rc_ptr);
1484 auto rc_adj_cx = new_sub_block_ctxt(cx, "rc++");
1485 auto next_cx = new_sub_block_ctxt(cx, "next");
1487 auto const_test = cx.build.ICmp(lib.llvm.LLVMIntEQ,
1488 C_int(abi.const_refcount as int), rc);
1489 cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
1491 rc = rc_adj_cx.build.Add(rc, C_int(1));
1492 rc_adj_cx.build.Store(rc, rc_ptr);
1493 rc_adj_cx.build.Br(next_cx.llbb);
1495 ret res(next_cx, C_nil());
1498 fn make_drop_glue(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
1501 ret decr_refcnt_and_if_zero
1502 (cx, v, bind trans_non_gc_free(_, v),
1507 case (ty.ty_vec(_)) {
1508 fn hit_zero(@block_ctxt cx, ValueRef v,
1509 @ty.t t) -> result {
1510 auto res = iter_sequence(cx, v, t,
1511 bind drop_ty(_,_,_));
1512 // FIXME: switch gc/non-gc on layer of the type.
1513 ret trans_non_gc_free(res.bcx, v);
1515 ret decr_refcnt_and_if_zero(cx, v,
1516 bind hit_zero(_, v, t),
1521 case (ty.ty_box(?body_ty)) {
1522 fn hit_zero(@block_ctxt cx, ValueRef v,
1523 @ty.t body_ty) -> result {
1524 auto body = cx.build.GEP(v,
1526 C_int(abi.box_rc_field_body)));
1528 auto body_val = load_scalar_or_boxed(cx, body, body_ty);
1529 auto res = drop_ty(cx, body_val, body_ty);
1530 // FIXME: switch gc/non-gc on layer of the type.
1531 ret trans_non_gc_free(res.bcx, v);
1533 ret decr_refcnt_and_if_zero(cx, v,
1534 bind hit_zero(_, v, body_ty),
1539 case (ty.ty_obj(_)) {
1540 fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
1542 // Call through the obj's own fields-drop glue first.
1546 C_int(abi.box_rc_field_body)));
1551 C_int(abi.obj_body_elt_tydesc)));
1553 call_tydesc_glue_full(cx, body, cx.build.Load(tydescptr),
1554 abi.tydesc_field_drop_glue_off);
1556 // Then free the body.
1557 // FIXME: switch gc/non-gc on layer of the type.
1558 ret trans_non_gc_free(cx, v);
1563 C_int(abi.obj_field_box)));
1565 auto boxptr = cx.build.Load(box_cell);
1567 ret decr_refcnt_and_if_zero(cx, boxptr,
1568 bind hit_zero(_, boxptr),
1573 case (ty.ty_fn(_,_,_)) {
1574 fn hit_zero(@block_ctxt cx, ValueRef v) -> result {
1576 // Call through the closure's own fields-drop glue first.
1580 C_int(abi.box_rc_field_body)));
1584 C_int(abi.closure_elt_bindings)));
1589 C_int(abi.closure_elt_tydesc)));
1591 call_tydesc_glue_full(cx, bindings, cx.build.Load(tydescptr),
1592 abi.tydesc_field_drop_glue_off);
1595 // Then free the body.
1596 // FIXME: switch gc/non-gc on layer of the type.
1597 ret trans_non_gc_free(cx, v);
1602 C_int(abi.fn_field_box)));
1604 auto boxptr = cx.build.Load(box_cell);
1606 ret decr_refcnt_and_if_zero(cx, boxptr,
1607 bind hit_zero(_, boxptr),
1613 if (ty.type_is_structural(t)) {
1614 ret iter_structural_ty(cx, v, t,
1615 bind drop_ty(_, _, _));
1617 } else if (ty.type_is_scalar(t) ||
1618 ty.type_is_native(t) ||
1619 ty.type_is_nil(t)) {
1620 ret res(cx, C_nil());
1624 cx.fcx.ccx.sess.bug("bad type in trans.make_drop_glue_inner: " +
1629 fn decr_refcnt_and_if_zero(@block_ctxt cx,
1631 fn(@block_ctxt cx) -> result inner,
1633 TypeRef t_else, ValueRef v_else) -> result {
1635 auto load_rc_cx = new_sub_block_ctxt(cx, "load rc");
1636 auto rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
1637 auto inner_cx = new_sub_block_ctxt(cx, inner_name);
1638 auto next_cx = new_sub_block_ctxt(cx, "next");
1640 auto null_test = cx.build.IsNull(box_ptr);
1641 cx.build.CondBr(null_test, next_cx.llbb, load_rc_cx.llbb);
1644 auto rc_ptr = load_rc_cx.build.GEP(box_ptr,
1646 C_int(abi.box_rc_field_refcnt)));
1648 auto rc = load_rc_cx.build.Load(rc_ptr);
1650 load_rc_cx.build.ICmp(lib.llvm.LLVMIntEQ,
1651 C_int(abi.const_refcount as int), rc);
1652 load_rc_cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
1654 rc = rc_adj_cx.build.Sub(rc, C_int(1));
1655 rc_adj_cx.build.Store(rc, rc_ptr);
1656 auto zero_test = rc_adj_cx.build.ICmp(lib.llvm.LLVMIntEQ, C_int(0), rc);
1657 rc_adj_cx.build.CondBr(zero_test, inner_cx.llbb, next_cx.llbb);
1659 auto inner_res = inner(inner_cx);
1660 inner_res.bcx.build.Br(next_cx.llbb);
1662 auto phi = next_cx.build.Phi(t_else,
1663 vec(v_else, v_else, v_else, inner_res.val),
1667 inner_res.bcx.llbb));
1669 ret res(next_cx, phi);
1674 fn variant_types(@crate_ctxt cx, &ast.variant v) -> vec[@ty.t] {
1675 let vec[@ty.t] tys = vec();
1676 alt (ty.ann_to_type(v.ann).struct) {
1677 case (ty.ty_fn(_, ?args, _)) {
1678 for (ty.arg arg in args) {
1682 case (ty.ty_tag(_, _)) { /* nothing */ }
1688 fn type_of_variant(@crate_ctxt cx, &ast.variant v) -> TypeRef {
1689 let vec[TypeRef] lltys = vec();
1690 auto tys = variant_types(cx, v);
1691 for (@ty.t typ in tys) {
1692 lltys += vec(type_of(cx, typ));
1694 ret T_struct(lltys);
1697 // Returns the type parameters of a tag.
1698 fn tag_ty_params(@crate_ctxt cx, ast.def_id id) -> vec[ast.ty_param] {
1699 check (cx.items.contains_key(id));
1700 alt (cx.items.get(id).node) {
1701 case (ast.item_tag(_, _, ?tps, _)) { ret tps; }
1703 fail; // not reached
1706 // Returns the variants in a tag.
1707 fn tag_variants(@crate_ctxt cx, ast.def_id id) -> vec[ast.variant] {
1708 check (cx.items.contains_key(id));
1709 alt (cx.items.get(id).node) {
1710 case (ast.item_tag(_, ?variants, _, _)) { ret variants; }
1712 fail; // not reached
1715 // Returns a new plain tag type of the given ID with no type parameters. Don't
1716 // use this function in new code; it's a hack to keep things working for now.
1717 fn mk_plain_tag(ast.def_id tid) -> @ty.t {
1718 let vec[@ty.t] tps = vec();
1719 ret ty.plain_ty(ty.ty_tag(tid, tps));
1723 type val_fn = fn(@block_ctxt cx, ValueRef v) -> result;
1725 type val_and_ty_fn = fn(@block_ctxt cx, ValueRef v, @ty.t t) -> result;
1727 type val_pair_and_ty_fn =
1728 fn(@block_ctxt cx, ValueRef av, ValueRef bv, @ty.t t) -> result;
1730 // Iterates through the elements of a structural type.
1731 fn iter_structural_ty(@block_ctxt cx,
1736 fn adaptor_fn(val_and_ty_fn f,
1740 @ty.t t) -> result {
1743 be iter_structural_ty_full(cx, v, v, t,
1744 bind adaptor_fn(f, _, _, _, _));
1748 fn iter_structural_ty_full(@block_ctxt cx,
1752 val_pair_and_ty_fn f)
1754 let result r = res(cx, C_nil());
1756 fn iter_boxpp(@block_ctxt cx,
1757 ValueRef box_a_cell,
1758 ValueRef box_b_cell,
1759 val_pair_and_ty_fn f) -> result {
1760 auto box_a_ptr = cx.build.Load(box_a_cell);
1761 auto box_b_ptr = cx.build.Load(box_b_cell);
1762 auto tnil = plain_ty(ty.ty_nil);
1763 auto tbox = plain_ty(ty.ty_box(tnil));
1765 auto inner_cx = new_sub_block_ctxt(cx, "iter box");
1766 auto next_cx = new_sub_block_ctxt(cx, "next");
1767 auto null_test = cx.build.IsNull(box_a_ptr);
1768 cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);
1770 auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
1771 r.bcx.build.Br(next_cx.llbb);
1772 ret res(next_cx, r.val);
1776 case (ty.ty_tup(?args)) {
1778 for (@ty.t arg in args) {
1779 r = GEP_tup_like(r.bcx, t, av, vec(0, i));
1781 r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
1784 load_scalar_or_boxed(r.bcx, elt_a, arg),
1785 load_scalar_or_boxed(r.bcx, elt_b, arg),
1790 case (ty.ty_rec(?fields)) {
1792 for (ty.field fld in fields) {
1793 r = GEP_tup_like(r.bcx, t, av, vec(0, i));
1794 auto llfld_a = r.val;
1795 r = GEP_tup_like(r.bcx, t, bv, vec(0, i));
1796 auto llfld_b = r.val;
1798 load_scalar_or_boxed(r.bcx, llfld_a, fld.ty),
1799 load_scalar_or_boxed(r.bcx, llfld_b, fld.ty),
1804 case (ty.ty_tag(?tid, ?tps)) {
1805 auto variants = tag_variants(cx.fcx.ccx, tid);
1806 auto n_variants = _vec.len[ast.variant](variants);
1808 auto lldiscrim_a_ptr = cx.build.GEP(av, vec(C_int(0), C_int(0)));
1809 auto llunion_a_ptr = cx.build.GEP(av, vec(C_int(0), C_int(1)));
1810 auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);
1812 auto lldiscrim_b_ptr = cx.build.GEP(bv, vec(C_int(0), C_int(0)));
1813 auto llunion_b_ptr = cx.build.GEP(bv, vec(C_int(0), C_int(1)));
1814 auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
1816 // NB: we must hit the discriminant first so that structural
1817 // comparison know not to proceed when the discriminants differ.
1819 bcx = f(bcx, lldiscrim_a, lldiscrim_b,
1820 plain_ty(ty.ty_int)).bcx;
1822 auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
1823 unr_cx.build.Unreachable();
1825 auto llswitch = bcx.build.Switch(lldiscrim_a, unr_cx.llbb,
1828 auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
1831 for (ast.variant variant in variants) {
1832 auto variant_cx = new_sub_block_ctxt(bcx,
1833 "tag-iter-variant-" +
1834 _uint.to_str(i, 10u));
1835 llvm.LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);
1837 if (_vec.len[ast.variant_arg](variant.args) > 0u) {
1839 auto llvarty = type_of_variant(bcx.fcx.ccx, variants.(i));
1841 auto fn_ty = ty.ann_to_type(variants.(i).ann);
1842 alt (fn_ty.struct) {
1843 case (ty.ty_fn(_, ?args, _)) {
1844 auto llvarp_a = variant_cx.build.
1845 TruncOrBitCast(llunion_a_ptr, T_ptr(llvarty));
1847 auto llvarp_b = variant_cx.build.
1848 TruncOrBitCast(llunion_b_ptr, T_ptr(llvarty));
1850 auto ty_params = tag_ty_params(bcx.fcx.ccx, tid);
1853 for (ty.arg a in args) {
1854 auto v = vec(C_int(0), C_int(j as int));
1857 variant_cx.build.GEP(llvarp_a, v);
1860 variant_cx.build.GEP(llvarp_b, v);
1862 auto ty_subst = ty.substitute_ty_params(
1863 ty_params, tps, a.ty);
1866 load_scalar_or_boxed(variant_cx,
1871 load_scalar_or_boxed(variant_cx,
1875 auto res = f(variant_cx,
1876 llfld_a, llfld_b, ty_subst);
1877 variant_cx = res.bcx;
1884 variant_cx.build.Br(next_cx.llbb);
1886 // Nullary variant; nothing to do.
1887 variant_cx.build.Br(next_cx.llbb);
1893 ret res(next_cx, C_nil());
1895 case (ty.ty_fn(_,_,_)) {
1899 C_int(abi.fn_field_box)));
1903 C_int(abi.fn_field_box)));
1904 ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
1906 case (ty.ty_obj(_)) {
1910 C_int(abi.obj_field_box)));
1914 C_int(abi.obj_field_box)));
1915 ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
1918 cx.fcx.ccx.sess.unimpl("type in iter_structural_ty_full");
1924 // Iterates through a pointer range, until the src* hits the src_lim*.
1925 fn iter_sequence_raw(@block_ctxt cx,
1926 ValueRef src, // elt*
1927 ValueRef src_lim, // elt*
1929 val_fn f) -> result {
1933 let ValueRef src_int = vp2i(bcx, src);
1934 let ValueRef src_lim_int = vp2i(bcx, src_lim);
1936 auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
1937 auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
1938 auto next_cx = new_sub_block_ctxt(cx, "next");
1940 bcx.build.Br(cond_cx.llbb);
1942 let ValueRef src_curr = cond_cx.build.Phi(T_int(),
1943 vec(src_int), vec(bcx.llbb));
1945 auto end_test = cond_cx.build.ICmp(lib.llvm.LLVMIntULT,
1946 src_curr, src_lim_int);
1948 cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
1950 auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
1952 auto body_res = f(body_cx, src_curr_ptr);
1953 body_cx = body_res.bcx;
1955 auto src_next = body_cx.build.Add(src_curr, elt_sz);
1956 body_cx.build.Br(cond_cx.llbb);
1958 cond_cx.build.AddIncomingToPhi(src_curr, vec(src_next),
1961 ret res(next_cx, C_nil());
1965 fn iter_sequence_inner(@block_ctxt cx,
1966 ValueRef src, // elt*
1967 ValueRef src_lim, // elt*
1969 val_and_ty_fn f) -> result {
1970 fn adaptor_fn(val_and_ty_fn f,
1973 ValueRef v) -> result {
1974 auto llty = type_of(cx.fcx.ccx, elt_ty);
1975 auto p = cx.build.PointerCast(v, T_ptr(llty));
1976 ret f(cx, load_scalar_or_boxed(cx, p, elt_ty), elt_ty);
1979 auto elt_sz = size_of(cx, elt_ty);
1980 be iter_sequence_raw(elt_sz.bcx, src, src_lim, elt_sz.val,
1981 bind adaptor_fn(f, elt_ty, _, _));
1985 // Iterates through the elements of a vec or str.
1986 fn iter_sequence(@block_ctxt cx,
1989 val_and_ty_fn f) -> result {
1991 fn iter_sequence_body(@block_ctxt cx,
1995 bool trailing_null) -> result {
1997 auto p0 = cx.build.GEP(v, vec(C_int(0),
1998 C_int(abi.vec_elt_data)));
1999 auto lenptr = cx.build.GEP(v, vec(C_int(0),
2000 C_int(abi.vec_elt_fill)));
2002 auto llunit_ty = type_of(cx.fcx.ccx, elt_ty);
2005 auto len = bcx.build.Load(lenptr);
2006 if (trailing_null) {
2007 auto unit_sz = size_of(bcx, elt_ty);
2009 len = bcx.build.Sub(len, unit_sz.val);
2012 auto p1 = vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len),
2015 ret iter_sequence_inner(cx, p0, p1, elt_ty, f);
2019 case (ty.ty_vec(?et)) {
2020 ret iter_sequence_body(cx, v, et, f, false);
2023 auto et = plain_ty(ty.ty_machine(common.ty_u8));
2024 ret iter_sequence_body(cx, v, et, f, true);
2028 cx.fcx.ccx.sess.bug("bad type in trans.iter_sequence");
2032 fn call_tydesc_glue_full(@block_ctxt cx, ValueRef v,
2033 ValueRef tydesc, int field) {
2034 auto llrawptr = cx.build.BitCast(v, T_ptr(T_i8()));
2035 auto lltydescs = cx.build.GEP(tydesc,
2037 C_int(abi.tydesc_field_first_param)));
2038 lltydescs = cx.build.Load(lltydescs);
2039 auto llfnptr = cx.build.GEP(tydesc, vec(C_int(0), C_int(field)));
2040 auto llfn = cx.build.Load(llfnptr);
2042 // FIXME: this adjustment has to do with the ridiculous encoding of
2043 // glue-pointer-constants in the tydesc records: They are tydesc-relative
2044 // displacements. This is purely for compatibility with rustboot and
2045 // should go when it is discarded.
2046 llfn = vi2p(cx, cx.build.Add(vp2i(cx, llfn),
2050 cx.build.FastCall(llfn, vec(C_null(T_ptr(T_nil())),
2052 C_null(T_ptr(T_nil())),
2057 fn call_tydesc_glue(@block_ctxt cx, ValueRef v, @ty.t t, int field) {
2058 auto td = get_tydesc(cx, t);
2059 call_tydesc_glue_full(td.bcx, v, td.val, field);
2062 fn incr_all_refcnts(@block_ctxt cx,
2064 @ty.t t) -> result {
2065 if (!ty.type_is_scalar(t)) {
2066 call_tydesc_glue(cx, v, t, abi.tydesc_field_take_glue_off);
2068 ret res(cx, C_nil());
2071 fn drop_slot(@block_ctxt cx,
2073 @ty.t t) -> result {
2074 auto llptr = load_scalar_or_boxed(cx, slot, t);
2075 auto re = drop_ty(cx, llptr, t);
2077 auto llty = val_ty(slot);
2078 auto llelemty = lib.llvm.llvm.LLVMGetElementType(llty);
2079 re.bcx.build.Store(C_null(llelemty), slot);
2083 fn drop_ty(@block_ctxt cx,
2085 @ty.t t) -> result {
2087 if (!ty.type_is_scalar(t)) {
2088 call_tydesc_glue(cx, v, t, abi.tydesc_field_drop_glue_off);
2090 ret res(cx, C_nil());
2093 fn call_memcpy(@block_ctxt cx,
2096 ValueRef n_bytes) -> result {
2097 auto src_ptr = cx.build.PointerCast(src, T_ptr(T_i8()));
2098 auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
2099 auto size = cx.build.IntCast(n_bytes, T_int());
2100 ret res(cx, cx.build.FastCall(cx.fcx.ccx.glues.memcpy_glue,
2101 vec(dst_ptr, src_ptr, size)));
2104 fn call_bzero(@block_ctxt cx,
2106 ValueRef n_bytes) -> result {
2107 auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
2108 auto size = cx.build.IntCast(n_bytes, T_int());
2109 ret res(cx, cx.build.FastCall(cx.fcx.ccx.glues.bzero_glue,
2110 vec(dst_ptr, size)));
2113 fn memcpy_ty(@block_ctxt cx,
2116 @ty.t t) -> result {
2117 if (ty.type_has_dynamic_size(t)) {
2118 auto llszptr = field_of_tydesc(cx, t, abi.tydesc_field_size);
2119 auto llsz = llszptr.bcx.build.Load(llszptr.val);
2120 ret call_memcpy(llszptr.bcx, dst, src, llsz);
2123 ret res(cx, cx.build.Store(cx.build.Load(src), dst));
2132 fn copy_ty(@block_ctxt cx,
2136 @ty.t t) -> result {
2137 if (ty.type_is_scalar(t) || ty.type_is_native(t)) {
2138 ret res(cx, cx.build.Store(src, dst));
2140 } else if (ty.type_is_nil(t)) {
2141 ret res(cx, C_nil());
2143 } else if (ty.type_is_boxed(t)) {
2144 auto r = incr_all_refcnts(cx, src, t);
2145 if (action == DROP_EXISTING) {
2146 r = drop_ty(r.bcx, r.bcx.build.Load(dst), t);
2148 ret res(r.bcx, r.bcx.build.Store(src, dst));
2150 } else if (ty.type_is_structural(t) ||
2151 ty.type_has_dynamic_size(t)) {
2152 auto r = incr_all_refcnts(cx, src, t);
2153 if (action == DROP_EXISTING) {
2154 r = drop_ty(r.bcx, dst, t);
2156 ret memcpy_ty(r.bcx, dst, src, t);
2159 cx.fcx.ccx.sess.bug("unexpected type in trans.copy_ty: " +
2164 fn trans_lit(@crate_ctxt cx, &ast.lit lit, &ast.ann ann) -> ValueRef {
2166 case (ast.lit_int(?i)) {
2169 case (ast.lit_uint(?u)) {
2170 ret C_int(u as int);
2172 case (ast.lit_mach_int(?tm, ?i)) {
2173 // FIXME: the entire handling of mach types falls apart
2174 // if target int width is larger than host, at the moment;
2175 // re-do the mach-int types using 'big' when that works.
2178 case (common.ty_u8) { t = T_i8(); }
2179 case (common.ty_u16) { t = T_i16(); }
2180 case (common.ty_u32) { t = T_i32(); }
2181 case (common.ty_u64) { t = T_i64(); }
2183 case (common.ty_i8) { t = T_i8(); }
2184 case (common.ty_i16) { t = T_i16(); }
2185 case (common.ty_i32) { t = T_i32(); }
2186 case (common.ty_i64) { t = T_i64(); }
2188 ret C_integral(i, t);
2190 case (ast.lit_char(?c)) {
2191 ret C_integral(c as int, T_char());
2193 case (ast.lit_bool(?b)) {
2196 case (ast.lit_nil) {
2199 case (ast.lit_str(?s)) {
2205 fn target_type(@crate_ctxt cx, @ty.t t) -> @ty.t {
2208 auto tm = ty.ty_machine(cx.sess.get_targ_cfg().int_type);
2209 ret @rec(struct=tm with *t);
2212 auto tm = ty.ty_machine(cx.sess.get_targ_cfg().uint_type);
2213 ret @rec(struct=tm with *t);
2215 case (_) { /* fall through */ }
2220 fn node_ann_type(@crate_ctxt cx, &ast.ann a) -> @ty.t {
2222 case (ast.ann_none) {
2223 cx.sess.bug("missing type annotation");
2225 case (ast.ann_type(?t)) {
2226 ret target_type(cx, t);
2231 fn node_type(@crate_ctxt cx, &ast.ann a) -> TypeRef {
2232 ret type_of(cx, node_ann_type(cx, a));
2235 fn trans_unary(@block_ctxt cx, ast.unop op,
2236 @ast.expr e, &ast.ann a) -> result {
2238 auto sub = trans_expr(cx, e);
2242 sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2243 ret res(sub.bcx, cx.build.Not(sub.val));
2246 sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2247 ret res(sub.bcx, cx.build.Not(sub.val));
2250 sub = autoderef(sub.bcx, sub.val, ty.expr_ty(e));
2251 ret res(sub.bcx, cx.build.Neg(sub.val));
2254 auto e_ty = ty.expr_ty(e);
2255 auto e_val = sub.val;
2256 auto box_ty = node_ann_type(sub.bcx.fcx.ccx, a);
2257 sub = trans_malloc_boxed(sub.bcx, e_ty);
2258 find_scope_cx(cx).cleanups +=
2259 clean(bind drop_ty(_, sub.val, box_ty));
2262 auto rc = sub.bcx.build.GEP(box,
2264 C_int(abi.box_rc_field_refcnt)));
2265 auto body = sub.bcx.build.GEP(box,
2267 C_int(abi.box_rc_field_body)));
2268 sub.bcx.build.Store(C_int(1), rc);
2270 // Cast the body type to the type of the value. This is needed to
2271 // make tags work, since tags have a different LLVM type depending
2272 // on whether they're boxed or not.
2273 if (!ty.type_has_dynamic_size(e_ty)) {
2274 auto llety = T_ptr(type_of(sub.bcx.fcx.ccx, e_ty));
2275 body = sub.bcx.build.PointerCast(body, llety);
2278 sub = copy_ty(sub.bcx, INIT, body, e_val, e_ty);
2279 ret res(sub.bcx, box);
2282 auto val = sub.bcx.build.GEP(sub.val,
2284 C_int(abi.box_rc_field_body)));
2285 auto e_ty = node_ann_type(sub.bcx.fcx.ccx, a);
2286 if (ty.type_is_scalar(e_ty) ||
2287 ty.type_is_nil(e_ty)) {
2288 val = sub.bcx.build.Load(val);
2290 ret res(sub.bcx, val);
2292 case (ast._mutable) {
2293 ret trans_expr(cx, e);
2299 fn trans_compare(@block_ctxt cx, ast.binop op, @ty.t t,
2300 ValueRef lhs, ValueRef rhs) -> result {
2302 if (ty.type_is_scalar(t)) {
2303 ret res(cx, trans_scalar_compare(cx, op, t, lhs, rhs));
2305 } else if (ty.type_is_structural(t)) {
2306 auto scx = new_sub_block_ctxt(cx, "structural compare start");
2307 auto next = new_sub_block_ctxt(cx, "structural compare end");
2308 cx.build.Br(scx.llbb);
2311 * We're doing lexicographic comparison here. We start with the
2312 * assumption that the two input elements are equal. Depending on
2313 * operator, this means that the result is either true or false;
2314 * equality produces 'true' for ==, <= and >=. It produces 'false' for
2317 * We then move one element at a time through the structure checking
2318 * for pairwise element equality. If we have equality, our assumption
2319 * about overall sequence equality is not modified, so we have to move
2320 * to the next element.
2322 * If we do not have pairwise element equality, we have reached an
2323 * element that 'decides' the lexicographic comparison. So we exit the
2324 * loop with a flag that indicates the true/false sense of that
2325 * decision, by testing the element again with the operator we're
2328 * When we're lucky, LLVM should be able to fold some of these two
2329 * tests together (as they're applied to the same operands and in some
2330 * cases are sometimes redundant). But we don't bother trying to
2331 * optimize combinations like that, at this level.
2334 auto flag = scx.build.Alloca(T_i1());
2337 // ==, <= and >= default to true if they find == all the way.
2338 case (ast.eq) { scx.build.Store(C_integral(1, T_i1()), flag); }
2339 case (ast.le) { scx.build.Store(C_integral(1, T_i1()), flag); }
2340 case (ast.ge) { scx.build.Store(C_integral(1, T_i1()), flag); }
2342 // ==, <= and >= default to false if they find == all the way.
2343 scx.build.Store(C_integral(0, T_i1()), flag);
2347 fn inner(@block_ctxt last_cx,
2353 @ty.t t) -> result {
2355 auto cnt_cx = new_sub_block_ctxt(cx, "continue comparison");
2356 auto stop_cx = new_sub_block_ctxt(cx, "stop comparison");
2358 // First 'eq' comparison: if so, continue to next elts.
2359 auto eq_r = trans_compare(cx, ast.eq, t, av, bv);
2360 eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
2362 // Second 'op' comparison: find out how this elt-pair decides.
2363 auto stop_r = trans_compare(stop_cx, op, t, av, bv);
2364 stop_r.bcx.build.Store(stop_r.val, flag);
2365 stop_r.bcx.build.Br(last_cx.llbb);
2366 ret res(cnt_cx, C_nil());
2369 auto r = iter_structural_ty_full(scx, lhs, rhs, t,
2370 bind inner(next, flag, op,
2373 r.bcx.build.Br(next.llbb);
2374 auto v = next.build.Load(flag);
2378 // FIXME: compare vec, str, box?
2379 cx.fcx.ccx.sess.unimpl("type in trans_compare");
2380 ret res(cx, C_bool(false));
2384 fn trans_scalar_compare(@block_ctxt cx, ast.binop op, @ty.t t,
2385 ValueRef lhs, ValueRef rhs) -> ValueRef {
2386 if (ty.type_is_fp(t)) {
2387 ret trans_fp_compare(cx, op, t, lhs, rhs);
2389 ret trans_integral_compare(cx, op, t, lhs, rhs);
2393 fn trans_fp_compare(@block_ctxt cx, ast.binop op, @ty.t fptype,
2394 ValueRef lhs, ValueRef rhs) -> ValueRef {
2396 auto cmp = lib.llvm.LLVMIntEQ;
2398 // FIXME: possibly use the unordered-or-< predicates here,
2399 // for now we're only going with ordered-and-< style (no NaNs).
2400 case (ast.eq) { cmp = lib.llvm.LLVMRealOEQ; }
2401 case (ast.ne) { cmp = lib.llvm.LLVMRealONE; }
2402 case (ast.lt) { cmp = lib.llvm.LLVMRealOLT; }
2403 case (ast.gt) { cmp = lib.llvm.LLVMRealOGT; }
2404 case (ast.le) { cmp = lib.llvm.LLVMRealOLE; }
2405 case (ast.ge) { cmp = lib.llvm.LLVMRealOGE; }
2408 ret cx.build.FCmp(cmp, lhs, rhs);
2411 fn trans_integral_compare(@block_ctxt cx, ast.binop op, @ty.t intype,
2412 ValueRef lhs, ValueRef rhs) -> ValueRef {
2413 auto cmp = lib.llvm.LLVMIntEQ;
2415 case (ast.eq) { cmp = lib.llvm.LLVMIntEQ; }
2416 case (ast.ne) { cmp = lib.llvm.LLVMIntNE; }
2419 if (ty.type_is_signed(intype)) {
2420 cmp = lib.llvm.LLVMIntSLT;
2422 cmp = lib.llvm.LLVMIntULT;
2426 if (ty.type_is_signed(intype)) {
2427 cmp = lib.llvm.LLVMIntSLE;
2429 cmp = lib.llvm.LLVMIntULE;
2433 if (ty.type_is_signed(intype)) {
2434 cmp = lib.llvm.LLVMIntSGT;
2436 cmp = lib.llvm.LLVMIntUGT;
2440 if (ty.type_is_signed(intype)) {
2441 cmp = lib.llvm.LLVMIntSGE;
2443 cmp = lib.llvm.LLVMIntUGE;
2447 ret cx.build.ICmp(cmp, lhs, rhs);
2450 fn trans_vec_append(@block_ctxt cx, @ty.t t,
2451 ValueRef lhs, ValueRef rhs) -> result {
2453 auto elt_ty = ty.sequence_element_type(t);
2455 auto skip_null = C_bool(false);
2457 case (ty.ty_str) { skip_null = C_bool(true); }
2463 auto llvec_tydesc = get_tydesc(bcx, t);
2464 bcx = llvec_tydesc.bcx;
2466 auto llelt_tydesc = get_tydesc(bcx, elt_ty);
2467 bcx = llelt_tydesc.bcx;
2469 auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
2470 auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());
2472 ret res(bcx, bcx.build.FastCall(cx.fcx.ccx.glues.vec_append_glue,
2473 vec(cx.fcx.lltaskptr,
2476 dst, src, skip_null)));
2479 fn trans_vec_add(@block_ctxt cx, @ty.t t,
2480 ValueRef lhs, ValueRef rhs) -> result {
2481 auto r = alloc_ty(cx, t);
2483 r = copy_ty(r.bcx, INIT, tmp, lhs, t);
2484 auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
2485 tmp = load_scalar_or_boxed(bcx, tmp, t);
2486 find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tmp, t));
2491 fn trans_eager_binop(@block_ctxt cx, ast.binop op, @ty.t intype,
2492 ValueRef lhs, ValueRef rhs) -> result {
2496 if (ty.type_is_sequence(intype)) {
2497 ret trans_vec_add(cx, intype, lhs, rhs);
2499 ret res(cx, cx.build.Add(lhs, rhs));
2501 case (ast.sub) { ret res(cx, cx.build.Sub(lhs, rhs)); }
2503 case (ast.mul) { ret res(cx, cx.build.Mul(lhs, rhs)); }
2505 if (ty.type_is_signed(intype)) {
2506 ret res(cx, cx.build.SDiv(lhs, rhs));
2508 ret res(cx, cx.build.UDiv(lhs, rhs));
2512 if (ty.type_is_signed(intype)) {
2513 ret res(cx, cx.build.SRem(lhs, rhs));
2515 ret res(cx, cx.build.URem(lhs, rhs));
2519 case (ast.bitor) { ret res(cx, cx.build.Or(lhs, rhs)); }
2520 case (ast.bitand) { ret res(cx, cx.build.And(lhs, rhs)); }
2521 case (ast.bitxor) { ret res(cx, cx.build.Xor(lhs, rhs)); }
2522 case (ast.lsl) { ret res(cx, cx.build.Shl(lhs, rhs)); }
2523 case (ast.lsr) { ret res(cx, cx.build.LShr(lhs, rhs)); }
2524 case (ast.asr) { ret res(cx, cx.build.AShr(lhs, rhs)); }
2526 ret trans_compare(cx, op, intype, lhs, rhs);
2532 fn autoderef(@block_ctxt cx, ValueRef v, @ty.t t) -> result {
2533 let ValueRef v1 = v;
2538 case (ty.ty_box(?inner)) {
2539 auto body = cx.build.GEP(v1,
2541 C_int(abi.box_rc_field_body)));
2543 v1 = load_scalar_or_boxed(cx, body, inner);
2552 fn autoderefed_ty(@ty.t t) -> @ty.t {
2557 case (ty.ty_box(?inner)) {
2567 fn trans_binary(@block_ctxt cx, ast.binop op,
2568 @ast.expr a, @ast.expr b) -> result {
2570 // First couple cases are lazy:
2575 auto lhs_res = trans_expr(cx, a);
2576 lhs_res = autoderef(lhs_res.bcx, lhs_res.val, ty.expr_ty(a));
2578 auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
2579 auto rhs_res = trans_expr(rhs_cx, b);
2580 rhs_res = autoderef(rhs_res.bcx, rhs_res.val, ty.expr_ty(b));
2582 auto lhs_false_cx = new_scope_block_ctxt(cx, "lhs false");
2583 auto lhs_false_res = res(lhs_false_cx, C_bool(false));
2585 lhs_res.bcx.build.CondBr(lhs_res.val,
2589 ret join_results(cx, T_bool(),
2590 vec(lhs_false_res, rhs_res));
2595 auto lhs_res = trans_expr(cx, a);
2596 lhs_res = autoderef(lhs_res.bcx, lhs_res.val, ty.expr_ty(a));
2598 auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
2599 auto rhs_res = trans_expr(rhs_cx, b);
2600 rhs_res = autoderef(rhs_res.bcx, rhs_res.val, ty.expr_ty(b));
2602 auto lhs_true_cx = new_scope_block_ctxt(cx, "lhs true");
2603 auto lhs_true_res = res(lhs_true_cx, C_bool(true));
2605 lhs_res.bcx.build.CondBr(lhs_res.val,
2609 ret join_results(cx, T_bool(),
2610 vec(lhs_true_res, rhs_res));
2614 // Remaining cases are eager:
2615 auto lhs = trans_expr(cx, a);
2616 auto lhty = ty.expr_ty(a);
2617 lhs = autoderef(lhs.bcx, lhs.val, lhty);
2618 auto rhs = trans_expr(lhs.bcx, b);
2619 auto rhty = ty.expr_ty(b);
2620 rhs = autoderef(rhs.bcx, rhs.val, rhty);
2621 ret trans_eager_binop(rhs.bcx, op,
2622 autoderefed_ty(lhty),
2629 fn join_results(@block_ctxt parent_cx,
2634 let vec[result] live = vec();
2635 let vec[ValueRef] vals = vec();
2636 let vec[BasicBlockRef] bbs = vec();
2638 for (result r in ins) {
2639 if (! is_terminated(r.bcx)) {
2646 alt (_vec.len[result](live)) {
2648 // No incoming edges are live, so we're in dead-code-land.
2649 // Arbitrarily pick the first dead edge, since the caller
2650 // is just going to propagate it outward.
2651 check (_vec.len[result](ins) >= 1u);
2656 // Only one incoming edge is live, so we just feed that block
2661 case (_) { /* fall through */ }
2664 // We have >1 incoming edges. Make a join block and br+phi them into it.
2665 auto join_cx = new_sub_block_ctxt(parent_cx, "join");
2666 for (result r in live) {
2667 r.bcx.build.Br(join_cx.llbb);
2669 auto phi = join_cx.build.Phi(t, vals, bbs);
2670 ret res(join_cx, phi);
2673 fn trans_if(@block_ctxt cx, @ast.expr cond,
2674 &ast.block thn, &option.t[@ast.expr] els) -> result {
2676 auto cond_res = trans_expr(cx, cond);
2678 auto then_cx = new_scope_block_ctxt(cx, "then");
2679 auto then_res = trans_block(then_cx, thn);
2681 auto else_cx = new_scope_block_ctxt(cx, "else");
2682 auto else_res = res(else_cx, C_nil());
2685 case (some[@ast.expr](?elexpr)) {
2686 // FIXME: Shouldn't need to unwrap the block here,
2687 // instead just use 'else_res = trans_expr(else_cx, elexpr)',
2688 // but either a) trans_expr doesn't handle expr_block
2689 // correctly or b) I have no idea what I'm doing...
2691 case (ast.expr_if(_, _, _, _)) {
2692 else_res = trans_expr(else_cx, elexpr);
2694 case (ast.expr_block(?b, _)) {
2695 else_res = trans_block(else_cx, b);
2699 case (_) { /* fall through */ }
2702 cond_res.bcx.build.CondBr(cond_res.val,
2706 // FIXME: use inferred type when available.
2707 ret join_results(cx, T_nil(),
2708 vec(then_res, else_res));
2711 fn trans_for(@block_ctxt cx,
2714 &ast.block body) -> result {
2716 fn inner(@block_ctxt cx,
2717 @ast.local local, ValueRef curr,
2718 @ty.t t, ast.block body) -> result {
2720 auto scope_cx = new_scope_block_ctxt(cx, "for loop scope");
2721 auto next_cx = new_sub_block_ctxt(cx, "next");
2723 cx.build.Br(scope_cx.llbb);
2724 auto local_res = alloc_local(scope_cx, local);
2725 auto bcx = copy_ty(local_res.bcx, INIT, local_res.val, curr, t).bcx;
2726 scope_cx.cleanups += clean(bind drop_slot(_, local_res.val, t));
2727 bcx = trans_block(bcx, body).bcx;
2728 bcx.build.Br(next_cx.llbb);
2729 ret res(next_cx, C_nil());
2733 let @ast.local local;
2735 case (ast.decl_local(?loc)) {
2740 auto seq_ty = ty.expr_ty(seq);
2741 auto seq_res = trans_expr(cx, seq);
2742 ret iter_sequence(seq_res.bcx, seq_res.val, seq_ty,
2743 bind inner(_, local, _, _, body));
2746 fn trans_for_each(@block_ctxt cx,
2749 &ast.block body) -> result {
2752 * The translation is a little .. complex here. Code like:
2758 * foreach (ty v in foo(a,b)) { body(p,q,v) }
2761 * Turns into a something like so (C/Rust mishmash):
2763 * type env = { *ty1 p, *ty2 q, ... };
2765 * let env e = { &p, &q, ... };
2767 * fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
2769 * foo([foreach123_body, env*], a, b);
2773 // Step 1: walk body and figure out which references it makes
2774 // escape. This could be determined upstream, and probably ought
2775 // to be so, eventualy. For first cut, skip this. Null env.
2777 auto env_ty = T_opaque_closure_ptr(cx.fcx.ccx.tn);
2780 // Step 2: Declare foreach body function.
2782 // FIXME: possibly support alias-mode here?
2783 auto decl_ty = plain_ty(ty.ty_nil);
2785 case (ast.decl_local(?local)) {
2786 decl_ty = node_ann_type(cx.fcx.ccx, local.ann);
2791 cx.fcx.ccx.names.next("_rust_foreach")
2792 + sep() + cx.fcx.ccx.path;
2794 // The 'env' arg entering the body function is a fake env member (as in
2795 // the env-part of the normal rust calling convention) that actually
2796 // points to a stack allocated env in this frame. We bundle that env
2797 // pointer along with the foreach-body-fn pointer into a 'normal' fn pair
2798 // and pass it in as a first class fn-arg to the iterator.
2800 auto iter_body_llty = type_of_fn_full(cx.fcx.ccx, ast.proto_fn,
2802 vec(rec(mode=ast.val, ty=decl_ty)),
2803 plain_ty(ty.ty_nil));
2805 let ValueRef lliterbody = decl_fastcall_fn(cx.fcx.ccx.llmod,
2808 // FIXME: handle ty params properly.
2809 let vec[ast.ty_param] ty_params = vec();
2811 auto fcx = new_fn_ctxt(cx.fcx.ccx, lliterbody);
2812 auto bcx = new_top_block_ctxt(fcx);
2814 // FIXME: populate lllocals from llenv here.
2815 auto res = trans_block(bcx, body);
2816 res.bcx.build.RetVoid();
2819 // Step 3: Call iter passing [lliterbody, llenv], plus other args.
2823 case (ast.expr_call(?f, ?args, ?ann)) {
2825 auto pair = cx.build.Alloca(T_fn_pair(cx.fcx.ccx.tn,
2827 auto code_cell = cx.build.GEP(pair,
2829 C_int(abi.fn_field_code)));
2830 cx.build.Store(lliterbody, code_cell);
2832 // log "lliterbody: " + val_str(cx.fcx.ccx.tn, lliterbody);
2833 ret trans_call(cx, f,
2834 some[ValueRef](cx.build.Load(pair)),
2843 fn trans_while(@block_ctxt cx, @ast.expr cond,
2844 &ast.block body) -> result {
2846 auto cond_cx = new_scope_block_ctxt(cx, "while cond");
2847 auto body_cx = new_scope_block_ctxt(cx, "while loop body");
2848 auto next_cx = new_sub_block_ctxt(cx, "next");
2850 auto body_res = trans_block(body_cx, body);
2851 auto cond_res = trans_expr(cond_cx, cond);
2853 body_res.bcx.build.Br(cond_cx.llbb);
2854 cond_res.bcx.build.CondBr(cond_res.val,
2858 cx.build.Br(cond_cx.llbb);
2859 ret res(next_cx, C_nil());
2862 fn trans_do_while(@block_ctxt cx, &ast.block body,
2863 @ast.expr cond) -> result {
2865 auto body_cx = new_scope_block_ctxt(cx, "do-while loop body");
2866 auto next_cx = new_sub_block_ctxt(cx, "next");
2868 auto body_res = trans_block(body_cx, body);
2869 auto cond_res = trans_expr(body_res.bcx, cond);
2871 cond_res.bcx.build.CondBr(cond_res.val,
2874 cx.build.Br(body_cx.llbb);
2875 ret res(next_cx, body_res.val);
2878 // Pattern matching translation
2880 // Returns a pointer to the union part of the LLVM representation of a tag
2881 // type, cast to the appropriate type.
2882 fn get_pat_union_ptr(@block_ctxt cx, vec[@ast.pat] subpats, ValueRef llval)
2884 auto llblobptr = cx.build.GEP(llval, vec(C_int(0), C_int(1)));
2886 // Generate the union type.
2887 let vec[TypeRef] llsubpattys = vec();
2888 for (@ast.pat subpat in subpats) {
2889 llsubpattys += vec(type_of(cx.fcx.ccx, pat_ty(subpat)));
2892 // Recursively check subpatterns.
2893 auto llunionty = T_struct(llsubpattys);
2894 ret cx.build.TruncOrBitCast(llblobptr, T_ptr(llunionty));
2897 fn trans_pat_match(@block_ctxt cx, @ast.pat pat, ValueRef llval,
2898 @block_ctxt next_cx) -> result {
2900 case (ast.pat_wild(_)) { ret res(cx, llval); }
2901 case (ast.pat_bind(_, _, _)) { ret res(cx, llval); }
2903 case (ast.pat_lit(?lt, ?ann)) {
2904 auto lllit = trans_lit(cx.fcx.ccx, *lt, ann);
2905 auto lltype = ty.ann_to_type(ann);
2906 auto lleq = trans_compare(cx, ast.eq, lltype, llval, lllit);
2908 auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
2909 lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
2910 ret res(matched_cx, llval);
2913 case (ast.pat_tag(?id, ?subpats, ?vdef_opt, ?ann)) {
2914 auto lltagptr = cx.build.GEP(llval, vec(C_int(0), C_int(0)));
2915 auto lltag = cx.build.Load(lltagptr);
2917 auto vdef = option.get[ast.variant_def](vdef_opt);
2918 auto variant_id = vdef._1;
2919 auto variant_tag = 0;
2921 auto variants = tag_variants(cx.fcx.ccx, vdef._0);
2923 for (ast.variant v in variants) {
2924 auto this_variant_id = v.id;
2925 if (variant_id._0 == this_variant_id._0 &&
2926 variant_id._1 == this_variant_id._1) {
2932 auto matched_cx = new_sub_block_ctxt(cx, "matched_cx");
2934 auto lleq = cx.build.ICmp(lib.llvm.LLVMIntEQ, lltag,
2935 C_int(variant_tag));
2936 cx.build.CondBr(lleq, matched_cx.llbb, next_cx.llbb);
2938 if (_vec.len[@ast.pat](subpats) > 0u) {
2939 auto llunionptr = get_pat_union_ptr(matched_cx, subpats,
2942 for (@ast.pat subpat in subpats) {
2943 auto llsubvalptr = matched_cx.build.GEP(llunionptr,
2946 auto llsubval = load_scalar_or_boxed(matched_cx,
2949 auto subpat_res = trans_pat_match(matched_cx, subpat,
2951 matched_cx = subpat_res.bcx;
2955 ret res(matched_cx, llval);
2962 fn trans_pat_binding(@block_ctxt cx, @ast.pat pat, ValueRef llval)
2965 case (ast.pat_wild(_)) { ret res(cx, llval); }
2966 case (ast.pat_lit(_, _)) { ret res(cx, llval); }
2967 case (ast.pat_bind(?id, ?def_id, ?ann)) {
2968 auto ty = node_ann_type(cx.fcx.ccx, ann);
2969 auto llty = type_of(cx.fcx.ccx, ty);
2971 auto dst = cx.build.Alloca(llty);
2972 llvm.LLVMSetValueName(dst, _str.buf(id));
2973 cx.fcx.lllocals.insert(def_id, dst);
2974 cx.cleanups += clean(bind drop_slot(_, dst, ty));
2976 ret copy_ty(cx, INIT, dst, llval, ty);
2978 case (ast.pat_tag(_, ?subpats, _, _)) {
2979 if (_vec.len[@ast.pat](subpats) == 0u) { ret res(cx, llval); }
2981 auto llunionptr = get_pat_union_ptr(cx, subpats, llval);
2985 for (@ast.pat subpat in subpats) {
2986 auto llsubvalptr = this_cx.build.GEP(llunionptr,
2987 vec(C_int(0), C_int(i)));
2988 auto llsubval = load_scalar_or_boxed(this_cx, llsubvalptr,
2990 auto subpat_res = trans_pat_binding(this_cx, subpat,
2992 this_cx = subpat_res.bcx;
2996 ret res(this_cx, llval);
3001 fn trans_alt(@block_ctxt cx, @ast.expr expr, vec[ast.arm] arms)
3003 auto expr_res = trans_expr(cx, expr);
3005 auto last_cx = new_sub_block_ctxt(expr_res.bcx, "last");
3007 auto this_cx = expr_res.bcx;
3008 for (ast.arm arm in arms) {
3009 auto next_cx = new_sub_block_ctxt(expr_res.bcx, "next");
3010 auto match_res = trans_pat_match(this_cx, arm.pat, expr_res.val,
3013 auto binding_cx = new_scope_block_ctxt(match_res.bcx, "binding");
3014 match_res.bcx.build.Br(binding_cx.llbb);
3016 auto binding_res = trans_pat_binding(binding_cx, arm.pat,
3019 auto block_res = trans_block(binding_res.bcx, arm.block);
3020 if (!is_terminated(block_res.bcx)) {
3021 block_res.bcx.build.Br(last_cx.llbb);
3027 // FIXME: This is executed when none of the patterns match; it should fail
3029 this_cx.build.Br(last_cx.llbb);
3031 // FIXME: This is very wrong; we should phi together all the arm blocks,
3032 // since this is an expression.
3033 ret res(last_cx, C_nil());
3036 type generic_info = rec(@ty.t item_type,
3037 vec[ValueRef] tydescs);
3039 type lval_result = rec(result res,
3041 option.t[generic_info] generic,
3042 option.t[ValueRef] llobj);
3044 fn lval_mem(@block_ctxt cx, ValueRef val) -> lval_result {
3045 ret rec(res=res(cx, val),
3047 generic=none[generic_info],
3048 llobj=none[ValueRef]);
3051 fn lval_val(@block_ctxt cx, ValueRef val) -> lval_result {
3052 ret rec(res=res(cx, val),
3054 generic=none[generic_info],
3055 llobj=none[ValueRef]);
3058 fn lval_generic_fn(@block_ctxt cx,
3059 ty.ty_params_and_ty tpt,
3064 check (cx.fcx.ccx.fn_pairs.contains_key(fn_id));
3065 auto lv = lval_val(cx, cx.fcx.ccx.fn_pairs.get(fn_id));
3066 auto monoty = node_ann_type(cx.fcx.ccx, ann);
3067 auto tys = ty.resolve_ty_params(tpt, monoty);
3069 if (_vec.len[@ty.t](tys) != 0u) {
3071 let vec[ValueRef] tydescs = vec();
3072 for (@ty.t t in tys) {
3073 auto td = get_tydesc(bcx, t);
3075 append[ValueRef](tydescs, td.val);
3077 auto gen = rec( item_type = tpt._1,
3078 tydescs = tydescs );
3079 lv = rec(res = res(bcx, lv.res.val),
3080 generic = some[generic_info](gen)
3086 fn trans_path(@block_ctxt cx, &ast.path p, &option.t[ast.def] dopt,
3087 &ast.ann ann) -> lval_result {
3089 case (some[ast.def](?def)) {
3091 case (ast.def_arg(?did)) {
3092 check (cx.fcx.llargs.contains_key(did));
3093 ret lval_mem(cx, cx.fcx.llargs.get(did));
3095 case (ast.def_local(?did)) {
3096 check (cx.fcx.lllocals.contains_key(did));
3097 ret lval_mem(cx, cx.fcx.lllocals.get(did));
3099 case (ast.def_binding(?did)) {
3100 check (cx.fcx.lllocals.contains_key(did));
3101 ret lval_mem(cx, cx.fcx.lllocals.get(did));
3103 case (ast.def_obj_field(?did)) {
3104 check (cx.fcx.llobjfields.contains_key(did));
3105 ret lval_mem(cx, cx.fcx.llobjfields.get(did));
3107 case (ast.def_fn(?did)) {
3108 check (cx.fcx.ccx.items.contains_key(did));
3109 auto fn_item = cx.fcx.ccx.items.get(did);
3110 ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
3112 case (ast.def_obj(?did)) {
3113 check (cx.fcx.ccx.items.contains_key(did));
3114 auto fn_item = cx.fcx.ccx.items.get(did);
3115 ret lval_generic_fn(cx, ty.item_ty(fn_item), did, ann);
3117 case (ast.def_variant(?tid, ?vid)) {
3118 if (cx.fcx.ccx.fn_pairs.contains_key(vid)) {
3119 check (cx.fcx.ccx.items.contains_key(tid));
3120 auto tag_item = cx.fcx.ccx.items.get(tid);
3121 auto params = ty.item_ty(tag_item)._0;
3122 auto fty = plain_ty(ty.ty_nil);
3123 alt (tag_item.node) {
3124 case (ast.item_tag(_, ?variants, _, _)) {
3125 for (ast.variant v in variants) {
3127 fty = node_ann_type(cx.fcx.ccx,
3133 ret lval_generic_fn(cx, tup(params, fty), vid, ann);
3136 auto tag_ty = node_ann_type(cx.fcx.ccx, ann);
3137 auto lldiscrim_gv = cx.fcx.ccx.discrims.get(vid);
3138 auto lldiscrim = cx.build.Load(lldiscrim_gv);
3140 auto alloc_result = alloc_ty(cx, tag_ty);
3141 auto lltagblob = alloc_result.val;
3142 auto lltagptr = alloc_result.bcx.build.PointerCast(
3143 lltagblob, T_ptr(type_of(cx.fcx.ccx, tag_ty)));
3145 auto lldiscrimptr = alloc_result.bcx.build.GEP(
3146 lltagptr, vec(C_int(0), C_int(0)));
3147 alloc_result.bcx.build.Store(lldiscrim, lldiscrimptr);
3149 ret lval_val(alloc_result.bcx, lltagptr);
3152 case (ast.def_const(?did)) {
3153 check (cx.fcx.ccx.consts.contains_key(did));
3154 ret lval_mem(cx, cx.fcx.ccx.consts.get(did));
3156 case (ast.def_native_fn(?did)) {
3157 check (cx.fcx.ccx.native_items.contains_key(did));
3158 auto fn_item = cx.fcx.ccx.native_items.get(did);
3159 ret lval_generic_fn(cx, ty.native_item_ty(fn_item),
3163 cx.fcx.ccx.sess.unimpl("def variant in trans");
3167 case (none[ast.def]) {
3168 cx.fcx.ccx.sess.err("unresolved expr_path in trans");
3174 fn trans_field(@block_ctxt cx, &ast.span sp, @ast.expr base,
3175 &ast.ident field, &ast.ann ann) -> lval_result {
3176 auto r = trans_expr(cx, base);
3177 auto t = ty.expr_ty(base);
3178 r = autoderef(r.bcx, r.val, t);
3179 t = autoderefed_ty(t);
3181 case (ty.ty_tup(?fields)) {
3182 let uint ix = ty.field_num(cx.fcx.ccx.sess, sp, field);
3183 auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
3184 ret lval_mem(v.bcx, v.val);
3186 case (ty.ty_rec(?fields)) {
3187 let uint ix = ty.field_idx(cx.fcx.ccx.sess, sp, field, fields);
3188 auto v = GEP_tup_like(r.bcx, t, r.val, vec(0, ix as int));
3189 ret lval_mem(v.bcx, v.val);
3191 case (ty.ty_obj(?methods)) {
3192 let uint ix = ty.method_idx(cx.fcx.ccx.sess, sp, field, methods);
3193 auto vtbl = r.bcx.build.GEP(r.val,
3195 C_int(abi.obj_field_vtbl)));
3196 vtbl = r.bcx.build.Load(vtbl);
3197 auto v = r.bcx.build.GEP(vtbl, vec(C_int(0),
3200 auto lvo = lval_mem(r.bcx, v);
3201 ret rec(llobj = some[ValueRef](r.val) with lvo);
3203 case (_) { cx.fcx.ccx.sess.unimpl("field variant in trans_field"); }
3208 fn trans_index(@block_ctxt cx, &ast.span sp, @ast.expr base,
3209 @ast.expr idx, &ast.ann ann) -> lval_result {
3211 auto lv = trans_expr(cx, base);
3212 lv = autoderef(lv.bcx, lv.val, ty.expr_ty(base));
3213 auto ix = trans_expr(lv.bcx, idx);
3217 // Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
3219 auto ix_size = llsize_of_real(cx.fcx.ccx, val_ty(ix.val));
3220 auto int_size = llsize_of_real(cx.fcx.ccx, T_int());
3221 if (ix_size < int_size) {
3222 ix_val = bcx.build.ZExt(ix.val, T_int());
3223 } else if (ix_size > int_size) {
3224 ix_val = bcx.build.Trunc(ix.val, T_int());
3229 auto llunit_ty = node_type(cx.fcx.ccx, ann);
3230 auto unit_sz = size_of(bcx, node_ann_type(cx.fcx.ccx, ann));
3233 auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
3235 auto lim = bcx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_fill)));
3236 lim = bcx.build.Load(lim);
3238 auto bounds_check = bcx.build.ICmp(lib.llvm.LLVMIntULT,
3241 auto fail_cx = new_sub_block_ctxt(bcx, "fail");
3242 auto next_cx = new_sub_block_ctxt(bcx, "next");
3243 bcx.build.CondBr(bounds_check, next_cx.llbb, fail_cx.llbb);
3245 // fail: bad bounds check.
3246 auto fail_res = trans_fail(fail_cx, sp, "bounds check");
3247 fail_res.bcx.build.Br(next_cx.llbb);
3249 auto body = next_cx.build.GEP(v, vec(C_int(0), C_int(abi.vec_elt_data)));
3250 auto elt = next_cx.build.GEP(body, vec(C_int(0), ix_val));
3251 ret lval_mem(next_cx, elt);
3254 // The additional bool returned indicates whether it's mem (that is
3255 // represented as an alloca or heap, hence needs a 'load' to be used as an
3258 fn trans_lval(@block_ctxt cx, @ast.expr e) -> lval_result {
3260 case (ast.expr_path(?p, ?dopt, ?ann)) {
3261 ret trans_path(cx, p, dopt, ann);
3263 case (ast.expr_field(?base, ?ident, ?ann)) {
3264 ret trans_field(cx, e.span, base, ident, ann);
3266 case (ast.expr_index(?base, ?idx, ?ann)) {
3267 ret trans_index(cx, e.span, base, idx, ann);
3269 case (_) { cx.fcx.ccx.sess.unimpl("expr variant in trans_lval"); }
3274 fn trans_cast(@block_ctxt cx, @ast.expr e, &ast.ann ann) -> result {
3275 auto e_res = trans_expr(cx, e);
3276 auto llsrctype = val_ty(e_res.val);
3277 auto t = node_ann_type(cx.fcx.ccx, ann);
3278 auto lldsttype = type_of(cx.fcx.ccx, t);
3279 if (!ty.type_is_fp(t)) {
3280 if (llvm.LLVMGetIntTypeWidth(lldsttype) >
3281 llvm.LLVMGetIntTypeWidth(llsrctype)) {
3282 if (ty.type_is_signed(t)) {
3283 // Widening signed cast.
3285 e_res.bcx.build.SExtOrBitCast(e_res.val,
3288 // Widening unsigned cast.
3290 e_res.bcx.build.ZExtOrBitCast(e_res.val,
3296 e_res.bcx.build.TruncOrBitCast(e_res.val,
3300 cx.fcx.ccx.sess.unimpl("fp cast");
3305 fn trans_bind_thunk(@crate_ctxt cx,
3308 vec[option.t[@ast.expr]] args,
3309 TypeRef llclosure_ty,
3310 vec[@ty.t] bound_tys,
3311 uint ty_param_count) -> ValueRef {
3312 // Construct a thunk-call with signature incoming_fty, and that copies
3313 // args forward into a call to outgoing_fty.
3315 let str s = cx.names.next("_rust_thunk") + sep() + cx.path;
3316 let TypeRef llthunk_ty = get_pair_fn_ty(type_of(cx, incoming_fty));
3317 let ValueRef llthunk = decl_fastcall_fn(cx.llmod, s, llthunk_ty);
3319 auto fcx = new_fn_ctxt(cx, llthunk);
3320 auto bcx = new_top_block_ctxt(fcx);
3322 auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ty);
3324 auto llbody = bcx.build.GEP(llclosure,
3326 C_int(abi.box_rc_field_body)));
3328 auto lltarget = bcx.build.GEP(llbody,
3330 C_int(abi.closure_elt_target)));
3332 auto llbound = bcx.build.GEP(llbody,
3334 C_int(abi.closure_elt_bindings)));
3336 auto lltargetclosure = bcx.build.GEP(lltarget,
3338 C_int(abi.fn_field_box)));
3339 lltargetclosure = bcx.build.Load(lltargetclosure);
3341 auto outgoing_ret_ty = ty.ty_fn_ret(outgoing_fty);
3342 auto outgoing_arg_tys = ty.ty_fn_args(outgoing_fty);
3344 auto llretptr = fcx.llretptr;
3345 if (ty.type_has_dynamic_size(outgoing_ret_ty)) {
3346 llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.tn));
3349 let vec[ValueRef] llargs = vec(llretptr,
3353 // Copy in the type parameters.
3355 while (i < ty_param_count) {
3356 auto lltyparam_ptr =
3357 bcx.build.GEP(llbody, vec(C_int(0),
3358 C_int(abi.closure_elt_ty_params),
3360 llargs += vec(bcx.build.Load(lltyparam_ptr));
3364 let uint a = 2u + i; // retptr, task ptr, env come first
3366 let uint outgoing_arg_index = 0u;
3367 for (option.t[@ast.expr] arg in args) {
3370 // Arg provided at binding time; thunk copies it from closure.
3371 case (some[@ast.expr](_)) {
3372 let ValueRef bound_arg = bcx.build.GEP(llbound,
3375 // FIXME: possibly support passing aliases someday.
3376 llargs += bcx.build.Load(bound_arg);
3380 // Arg will be provided when the thunk is invoked.
3381 case (none[@ast.expr]) {
3382 let ValueRef passed_arg = llvm.LLVMGetParam(llthunk, a);
3383 if (ty.type_has_dynamic_size(outgoing_arg_tys.
3384 (outgoing_arg_index).ty)) {
3385 // Cast to a generic typaram pointer in order to make a
3386 // type-compatible call.
3387 passed_arg = bcx.build.PointerCast(passed_arg,
3388 T_typaram_ptr(cx.tn));
3390 llargs += passed_arg;
3395 outgoing_arg_index += 0u;
3398 // FIXME: turn this call + ret into a tail call.
3399 auto lltargetfn = bcx.build.GEP(lltarget,
3401 C_int(abi.fn_field_code)));
3402 lltargetfn = bcx.build.Load(lltargetfn);
3404 auto r = bcx.build.FastCall(lltargetfn, llargs);
3405 bcx.build.RetVoid();
3410 fn trans_bind(@block_ctxt cx, @ast.expr f,
3411 vec[option.t[@ast.expr]] args,
3412 &ast.ann ann) -> result {
3413 auto f_res = trans_lval(cx, f);
3415 cx.fcx.ccx.sess.unimpl("re-binding existing function");
3417 let vec[@ast.expr] bound = vec();
3419 for (option.t[@ast.expr] argopt in args) {
3421 case (none[@ast.expr]) {
3423 case (some[@ast.expr](?e)) {
3424 append[@ast.expr](bound, e);
3429 // Figure out which tydescs we need to pass, if any.
3430 let @ty.t outgoing_fty;
3431 let vec[ValueRef] lltydescs;
3432 alt (f_res.generic) {
3433 case (none[generic_info]) {
3434 outgoing_fty = ty.expr_ty(f);
3437 case (some[generic_info](?ginfo)) {
3438 outgoing_fty = ginfo.item_type;
3439 lltydescs = ginfo.tydescs;
3442 auto ty_param_count = _vec.len[ValueRef](lltydescs);
3444 if (_vec.len[@ast.expr](bound) == 0u && ty_param_count == 0u) {
3445 // Trivial 'binding': just return the static pair-ptr.
3448 auto bcx = f_res.res.bcx;
3449 auto pair_t = node_type(cx.fcx.ccx, ann);
3450 auto pair_v = bcx.build.Alloca(pair_t);
3452 // Translate the bound expressions.
3453 let vec[@ty.t] bound_tys = vec();
3454 let vec[ValueRef] bound_vals = vec();
3456 for (@ast.expr e in bound) {
3457 auto arg = trans_expr(bcx, e);
3460 append[ValueRef](bound_vals, arg.val);
3461 append[@ty.t](bound_tys, ty.expr_ty(e));
3466 // Get the type of the bound function.
3467 let TypeRef lltarget_ty = type_of(bcx.fcx.ccx, outgoing_fty);
3469 // Synthesize a closure type.
3470 let @ty.t bindings_ty = plain_ty(ty.ty_tup(bound_tys));
3471 let TypeRef llbindings_ty = type_of(bcx.fcx.ccx, bindings_ty);
3472 let TypeRef llclosure_ty = T_closure_ptr(cx.fcx.ccx.tn,
3477 // Malloc a box for the body.
3478 // FIXME: this isn't generic-safe
3479 auto r = trans_raw_malloc(bcx, llclosure_ty,
3480 llsize_of(llvm.LLVMGetElementType(llclosure_ty)));
3483 auto rc = bcx.build.GEP(box,
3485 C_int(abi.box_rc_field_refcnt)));
3489 C_int(abi.box_rc_field_body)));
3490 bcx.build.Store(C_int(1), rc);
3492 // Store bindings tydesc.
3494 bcx.build.GEP(closure,
3496 C_int(abi.closure_elt_tydesc)));
3497 auto bindings_tydesc = get_tydesc(bcx, bindings_ty);
3498 bcx = bindings_tydesc.bcx;
3499 bcx.build.Store(bindings_tydesc.val, bound_tydesc);
3501 // Store thunk-target.
3503 bcx.build.GEP(closure,
3505 C_int(abi.closure_elt_target)));
3506 auto src = bcx.build.Load(f_res.res.val);
3507 bcx.build.Store(src, bound_target);
3509 // Copy expr values into boxed bindings.
3512 bcx.build.GEP(closure,
3514 C_int(abi.closure_elt_bindings)));
3515 for (ValueRef v in bound_vals) {
3516 auto bound = bcx.build.GEP(bindings,
3517 vec(C_int(0), C_int(i as int)));
3518 bcx = copy_ty(r.bcx, INIT, bound, v, bound_tys.(i)).bcx;
3522 // If necessary, copy tydescs describing type parameters into the
3523 // appropriate slot in the closure.
3524 alt (f_res.generic) {
3525 case (none[generic_info]) { /* nothing to do */ }
3526 case (some[generic_info](?ginfo)) {
3527 auto ty_params_slot =
3528 bcx.build.GEP(closure,
3530 C_int(abi.closure_elt_ty_params)));
3532 for (ValueRef td in ginfo.tydescs) {
3533 auto ty_param_slot = bcx.build.GEP(ty_params_slot,
3536 bcx.build.Store(td, ty_param_slot);
3542 // Make thunk and store thunk-ptr in outer pair's code slot.
3543 auto pair_code = bcx.build.GEP(pair_v,
3545 C_int(abi.fn_field_code)));
3547 let @ty.t pair_ty = node_ann_type(cx.fcx.ccx, ann);
3548 let ValueRef llthunk =
3549 trans_bind_thunk(cx.fcx.ccx, pair_ty, outgoing_fty,
3550 args, llclosure_ty, bound_tys,
3553 bcx.build.Store(llthunk, pair_code);
3555 // Store box ptr in outer pair's box slot.
3556 auto pair_box = bcx.build.GEP(pair_v,
3558 C_int(abi.fn_field_box)));
3560 (bcx.build.PointerCast
3562 T_opaque_closure_ptr(bcx.fcx.ccx.tn)),
3565 find_scope_cx(cx).cleanups +=
3566 clean(bind drop_slot(_, pair_v, pair_ty));
3568 ret res(bcx, pair_v);
3573 // NB: must keep 4 fns in sync:
3575 // - type_of_fn_full
3576 // - create_llargs_for_fn_args.
3580 fn trans_args(@block_ctxt cx,
3582 option.t[ValueRef] llobj,
3583 option.t[generic_info] gen,
3584 option.t[ValueRef] lliterbody,
3587 -> tup(@block_ctxt, vec[ValueRef], ValueRef) {
3589 let vec[ty.arg] args = ty.ty_fn_args(fn_ty);
3590 let vec[ValueRef] llargs = vec();
3591 let vec[ValueRef] lltydescs = vec();
3592 let @block_ctxt bcx = cx;
3595 // Arg 0: Output pointer.
3596 auto retty = ty.ty_fn_ret(fn_ty);
3597 auto llretslot_res = alloc_ty(bcx, retty);
3598 bcx = llretslot_res.bcx;
3599 auto llretslot = llretslot_res.val;
3602 case (some[generic_info](?g)) {
3603 lltydescs = g.tydescs;
3604 args = ty.ty_fn_args(g.item_type);
3605 retty = ty.ty_fn_ret(g.item_type);
3610 if (ty.type_has_dynamic_size(retty)) {
3611 llargs += bcx.build.PointerCast(llretslot,
3612 T_typaram_ptr(cx.fcx.ccx.tn));
3613 } else if (ty.count_ty_params(retty) != 0u) {
3614 // It's possible that the callee has some generic-ness somewhere in
3615 // its return value -- say a method signature within an obj or a fn
3616 // type deep in a structure -- which the caller has a concrete view
3617 // of. If so, cast the caller's view of the restlot to the callee's
3618 // view, for the sake of making a type-compatible call.
3619 llargs += cx.build.PointerCast(llretslot,
3620 T_ptr(type_of(bcx.fcx.ccx, retty)));
3622 llargs += llretslot;
3626 // Arg 1: Task pointer.
3627 llargs += bcx.fcx.lltaskptr;
3629 // Arg 2: Env (closure-bindings / self-obj)
3631 case (some[ValueRef](?ob)) {
3632 // Every object is always found in memory,
3633 // and not-yet-loaded (as part of an lval x.y
3634 // doted method-call).
3635 llargs += bcx.build.Load(ob);
3642 // Args >3: ty_params ...
3643 llargs += lltydescs;
3645 // ... then possibly an lliterbody argument.
3647 case (none[ValueRef]) {}
3648 case (some[ValueRef](?lli)) {
3653 // ... then explicit args.
3655 // First we figure out the caller's view of the types of the arguments.
3656 // This will be needed if this is a generic call, because the callee has
3657 // to cast her view of the arguments to the caller's view.
3658 auto arg_tys = type_of_explicit_args(cx.fcx.ccx, args);
3661 for (@ast.expr e in es) {
3662 auto mode = args.(i).mode;
3665 if (ty.type_is_structural(ty.expr_ty(e))) {
3666 auto re = trans_expr(bcx, e);
3669 if (mode == ast.val) {
3670 // Until here we've been treating structures by pointer;
3671 // we are now passing it as an arg, so need to load it.
3672 val = bcx.build.Load(val);
3674 } else if (mode == ast.alias) {
3676 if (ty.is_lval(e)) {
3677 lv = trans_lval(bcx, e);
3679 auto r = trans_expr(bcx, e);
3680 lv = lval_val(r.bcx, r.val);
3687 // Non-mem but we're trying to alias; synthesize an
3688 // alloca, spill to it and pass its address.
3689 auto llty = val_ty(lv.res.val);
3690 auto llptr = lv.res.bcx.build.Alloca(llty);
3691 lv.res.bcx.build.Store(lv.res.val, llptr);
3696 auto re = trans_expr(bcx, e);
3701 if (ty.count_ty_params(args.(i).ty) > 0u) {
3702 auto lldestty = arg_tys.(i);
3703 val = bcx.build.PointerCast(val, lldestty);
3710 ret tup(bcx, llargs, llretslot);
3713 fn trans_call(@block_ctxt cx, @ast.expr f,
3714 option.t[ValueRef] lliterbody,
3715 vec[@ast.expr] args,
3716 &ast.ann ann) -> result {
3717 auto f_res = trans_lval(cx, f);
3718 auto faddr = f_res.res.val;
3719 auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.ccx.tn));
3722 case (some[ValueRef](_)) {
3723 // It's a vtbl entry.
3724 faddr = f_res.res.bcx.build.Load(faddr);
3726 case (none[ValueRef]) {
3728 auto bcx = f_res.res.bcx;
3730 faddr = bcx.build.GEP(pair, vec(C_int(0),
3731 C_int(abi.fn_field_code)));
3732 faddr = bcx.build.Load(faddr);
3734 auto llclosure = bcx.build.GEP(pair,
3736 C_int(abi.fn_field_box)));
3737 llenv = bcx.build.Load(llclosure);
3740 auto fn_ty = ty.expr_ty(f);
3741 auto ret_ty = ty.ann_to_type(ann);
3742 auto args_res = trans_args(f_res.res.bcx,
3748 auto bcx = args_res._0;
3749 auto llargs = args_res._1;
3750 auto llretslot = args_res._2;
3753 log "calling: " + val_str(cx.fcx.ccx.tn, faddr);
3755 for (ValueRef arg in llargs) {
3756 log "arg: " + val_str(cx.fcx.ccx.tn, arg);
3760 bcx.build.FastCall(faddr, llargs);
3761 auto retval = C_nil();
3763 if (!ty.type_is_nil(ret_ty)) {
3764 retval = load_scalar_or_boxed(bcx, llretslot, ret_ty);
3765 // Retval doesn't correspond to anything really tangible in the frame,
3766 // but it's a ref all the same, so we put a note here to drop it when
3767 // we're done in this scope.
3768 find_scope_cx(cx).cleanups += clean(bind drop_ty(_, retval, ret_ty));
3771 ret res(bcx, retval);
3774 fn trans_tup(@block_ctxt cx, vec[ast.elt] elts,
3775 &ast.ann ann) -> result {
3777 auto t = node_ann_type(bcx.fcx.ccx, ann);
3778 auto tup_res = alloc_ty(bcx, t);
3779 auto tup_val = tup_res.val;
3782 find_scope_cx(cx).cleanups += clean(bind drop_ty(_, tup_val, t));
3785 for (ast.elt e in elts) {
3786 auto e_ty = ty.expr_ty(e.expr);
3787 auto src_res = trans_expr(bcx, e.expr);
3789 auto dst_res = GEP_tup_like(bcx, t, tup_val, vec(0, i));
3791 bcx = copy_ty(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
3794 ret res(bcx, tup_val);
3797 fn trans_vec(@block_ctxt cx, vec[@ast.expr] args,
3798 &ast.ann ann) -> result {
3799 auto t = node_ann_type(cx.fcx.ccx, ann);
3802 case (ty.ty_vec(?t)) {
3806 cx.fcx.ccx.sess.bug("non-vec type in trans_vec");
3810 auto llunit_ty = type_of(cx.fcx.ccx, unit_ty);
3812 auto unit_sz = size_of(bcx, unit_ty);
3814 auto data_sz = llvm.LLVMConstMul(C_int(_vec.len[@ast.expr](args) as int),
3817 // FIXME: pass tydesc properly.
3818 auto sub = trans_upcall(bcx, "upcall_new_vec", vec(data_sz, C_int(0)));
3821 auto llty = type_of(bcx.fcx.ccx, t);
3822 auto vec_val = vi2p(bcx, sub.val, llty);
3823 find_scope_cx(bcx).cleanups += clean(bind drop_ty(_, vec_val, t));
3825 auto body = bcx.build.GEP(vec_val, vec(C_int(0),
3826 C_int(abi.vec_elt_data)));
3828 auto pseudo_tup_ty =
3829 plain_ty(ty.ty_tup(_vec.init_elt[@ty.t](unit_ty,
3830 _vec.len[@ast.expr](args))));
3833 for (@ast.expr e in args) {
3834 auto src_res = trans_expr(bcx, e);
3836 auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, vec(0, i));
3838 bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, unit_ty).bcx;
3841 auto fill = bcx.build.GEP(vec_val,
3842 vec(C_int(0), C_int(abi.vec_elt_fill)));
3843 bcx.build.Store(data_sz, fill);
3845 ret res(bcx, vec_val);
3848 fn trans_rec(@block_ctxt cx, vec[ast.field] fields,
3849 option.t[@ast.expr] base, &ast.ann ann) -> result {
3852 auto t = node_ann_type(bcx.fcx.ccx, ann);
3853 auto llty = type_of(bcx.fcx.ccx, t);
3854 auto rec_res = alloc_ty(bcx, t);
3855 auto rec_val = rec_res.val;
3858 find_scope_cx(cx).cleanups += clean(bind drop_ty(_, rec_val, t));
3861 auto base_val = C_nil();
3864 case (none[@ast.expr]) { }
3865 case (some[@ast.expr](?bexp)) {
3866 auto base_res = trans_expr(bcx, bexp);
3868 base_val = base_res.val;
3872 let vec[ty.field] ty_fields = vec();
3874 case (ty.ty_rec(?flds)) { ty_fields = flds; }
3877 for (ty.field tf in ty_fields) {
3879 auto dst_res = GEP_tup_like(bcx, t, rec_val, vec(0, i));
3882 auto expr_provided = false;
3883 auto src_res = res(bcx, C_nil());
3885 for (ast.field f in fields) {
3886 if (_str.eq(f.ident, tf.ident)) {
3887 expr_provided = true;
3888 src_res = trans_expr(bcx, f.expr);
3891 if (!expr_provided) {
3892 src_res = GEP_tup_like(bcx, t, base_val, vec(0, i));
3893 src_res = res(src_res.bcx,
3894 load_scalar_or_boxed(bcx, src_res.val, e_ty));
3898 bcx = copy_ty(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
3901 ret res(bcx, rec_val);
3906 fn trans_expr(@block_ctxt cx, @ast.expr e) -> result {
3908 case (ast.expr_lit(?lit, ?ann)) {
3909 ret res(cx, trans_lit(cx.fcx.ccx, *lit, ann));
3912 case (ast.expr_unary(?op, ?x, ?ann)) {
3913 ret trans_unary(cx, op, x, ann);
3916 case (ast.expr_binary(?op, ?x, ?y, _)) {
3917 ret trans_binary(cx, op, x, y);
3920 case (ast.expr_if(?cond, ?thn, ?els, _)) {
3921 ret trans_if(cx, cond, thn, els);
3924 case (ast.expr_for(?decl, ?seq, ?body, _)) {
3925 ret trans_for(cx, decl, seq, body);
3928 case (ast.expr_for_each(?decl, ?seq, ?body, _)) {
3929 ret trans_for_each(cx, decl, seq, body);
3932 case (ast.expr_while(?cond, ?body, _)) {
3933 ret trans_while(cx, cond, body);
3936 case (ast.expr_do_while(?body, ?cond, _)) {
3937 ret trans_do_while(cx, body, cond);
3940 case (ast.expr_alt(?expr, ?arms, _)) {
3941 ret trans_alt(cx, expr, arms);
3944 case (ast.expr_block(?blk, _)) {
3945 auto sub_cx = new_scope_block_ctxt(cx, "block-expr body");
3946 auto next_cx = new_sub_block_ctxt(cx, "next");
3947 auto sub = trans_block(sub_cx, blk);
3949 cx.build.Br(sub_cx.llbb);
3950 sub.bcx.build.Br(next_cx.llbb);
3952 ret res(next_cx, sub.val);
3955 case (ast.expr_assign(?dst, ?src, ?ann)) {
3956 auto lhs_res = trans_lval(cx, dst);
3957 check (lhs_res.is_mem);
3958 auto rhs_res = trans_expr(lhs_res.res.bcx, src);
3959 auto t = node_ann_type(cx.fcx.ccx, ann);
3960 // FIXME: calculate copy init-ness in typestate.
3961 ret copy_ty(rhs_res.bcx, DROP_EXISTING,
3962 lhs_res.res.val, rhs_res.val, t);
3965 case (ast.expr_assign_op(?op, ?dst, ?src, ?ann)) {
3966 auto t = node_ann_type(cx.fcx.ccx, ann);
3967 auto lhs_res = trans_lval(cx, dst);
3968 check (lhs_res.is_mem);
3969 auto lhs_val = load_scalar_or_boxed(lhs_res.res.bcx,
3970 lhs_res.res.val, t);
3971 auto rhs_res = trans_expr(lhs_res.res.bcx, src);
3972 auto v = trans_eager_binop(rhs_res.bcx, op, t,
3973 lhs_val, rhs_res.val);
3974 // FIXME: calculate copy init-ness in typestate.
3975 ret copy_ty(v.bcx, DROP_EXISTING,
3976 lhs_res.res.val, v.val, t);
3979 case (ast.expr_bind(?f, ?args, ?ann)) {
3980 ret trans_bind(cx, f, args, ann);
3983 case (ast.expr_call(?f, ?args, ?ann)) {
3984 ret trans_call(cx, f, none[ValueRef], args, ann);
3987 case (ast.expr_cast(?e, _, ?ann)) {
3988 ret trans_cast(cx, e, ann);
3991 case (ast.expr_vec(?args, ?ann)) {
3992 ret trans_vec(cx, args, ann);
3995 case (ast.expr_tup(?args, ?ann)) {
3996 ret trans_tup(cx, args, ann);
3999 case (ast.expr_rec(?args, ?base, ?ann)) {
4000 ret trans_rec(cx, args, base, ann);
4003 case (ast.expr_ext(_, _, _, ?expanded, _)) {
4004 ret trans_expr(cx, expanded);
4007 case (ast.expr_fail) {
4008 ret trans_fail(cx, e.span, "explicit failure");
4011 case (ast.expr_log(?a)) {
4012 ret trans_log(cx, a);
4015 case (ast.expr_check_expr(?a)) {
4016 ret trans_check_expr(cx, a);
4019 case (ast.expr_ret(?e)) {
4020 ret trans_ret(cx, e);
4023 case (ast.expr_put(?e)) {
4024 ret trans_put(cx, e);
4027 case (ast.expr_be(?e)) {
4028 ret trans_be(cx, e);
4031 // lval cases fall through to trans_lval and then
4032 // possibly load the result (if it's non-structural).
4035 auto t = ty.expr_ty(e);
4036 auto sub = trans_lval(cx, e);
4037 ret res(sub.res.bcx,
4038 load_scalar_or_boxed(sub.res.bcx, sub.res.val, t));
4041 cx.fcx.ccx.sess.unimpl("expr variant in trans_expr");
4045 // We pass structural values around the compiler "by pointer" and
4046 // non-structural values (scalars and boxes) "by value". This function selects
4047 // whether to load a pointer or pass it.
4049 fn load_scalar_or_boxed(@block_ctxt cx,
4051 @ty.t t) -> ValueRef {
4052 if (ty.type_is_scalar(t) || ty.type_is_boxed(t) || ty.type_is_native(t)) {
4053 ret cx.build.Load(v);
4059 fn trans_log(@block_ctxt cx, @ast.expr e) -> result {
4061 auto sub = trans_expr(cx, e);
4062 auto e_ty = ty.expr_ty(e);
4065 auto v = vp2i(sub.bcx, sub.val);
4066 ret trans_upcall(sub.bcx,
4071 ret trans_upcall(sub.bcx,
4079 fn trans_check_expr(@block_ctxt cx, @ast.expr e) -> result {
4080 auto cond_res = trans_expr(cx, e);
4082 // FIXME: need pretty-printer.
4083 auto expr_str = "<expr>";
4084 auto fail_cx = new_sub_block_ctxt(cx, "fail");
4085 auto fail_res = trans_fail(fail_cx, e.span, expr_str);
4087 auto next_cx = new_sub_block_ctxt(cx, "next");
4088 fail_res.bcx.build.Br(next_cx.llbb);
4089 cond_res.bcx.build.CondBr(cond_res.val,
4092 ret res(next_cx, C_nil());
4095 fn trans_fail(@block_ctxt cx, common.span sp, str fail_str) -> result {
4096 auto V_fail_str = p2i(C_cstr(cx.fcx.ccx, fail_str));
4097 auto V_filename = p2i(C_cstr(cx.fcx.ccx, sp.filename));
4098 auto V_line = sp.lo.line as int;
4099 auto args = vec(V_fail_str, V_filename, C_int(V_line));
4101 ret trans_upcall(cx, "upcall_fail", args);
4104 fn trans_put(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
4105 auto llcallee = C_nil();
4106 auto llenv = C_nil();
4108 alt (cx.fcx.lliterbody) {
4109 case (some[ValueRef](?lli)) {
4110 auto slot = cx.build.Alloca(val_ty(lli));
4111 cx.build.Store(lli, slot);
4113 llcallee = cx.build.GEP(slot, vec(C_int(0),
4114 C_int(abi.fn_field_code)));
4115 llcallee = cx.build.Load(llcallee);
4117 llenv = cx.build.GEP(slot, vec(C_int(0),
4118 C_int(abi.fn_field_box)));
4119 llenv = cx.build.Load(llenv);
4123 auto dummy_retslot = bcx.build.Alloca(T_nil());
4124 let vec[ValueRef] llargs = vec(dummy_retslot, cx.fcx.lltaskptr, llenv);
4126 case (none[@ast.expr]) { }
4127 case (some[@ast.expr](?x)) {
4128 auto r = trans_expr(bcx, x);
4133 ret res(bcx, bcx.build.FastCall(llcallee, llargs));
4136 fn trans_ret(@block_ctxt cx, &option.t[@ast.expr] e) -> result {
4141 case (some[@ast.expr](?x)) {
4142 auto t = ty.expr_ty(x);
4143 auto r = trans_expr(cx, x);
4146 bcx = copy_ty(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
4148 case (_) { /* fall through */ }
4151 // Run all cleanups and back out.
4152 let bool more_cleanups = true;
4153 auto cleanup_cx = cx;
4154 while (more_cleanups) {
4155 bcx = trans_block_cleanups(bcx, cleanup_cx);
4156 alt (cleanup_cx.parent) {
4157 case (parent_some(?b)) {
4160 case (parent_none) {
4161 more_cleanups = false;
4166 bcx.build.RetVoid();
4167 ret res(bcx, C_nil());
4170 fn trans_be(@block_ctxt cx, @ast.expr e) -> result {
4171 // FIXME: This should be a typestate precondition
4172 check (ast.is_call_expr(e));
4173 // FIXME: Turn this into a real tail call once
4174 // calling convention issues are settled
4175 ret trans_ret(cx, some(e));
4178 fn init_local(@block_ctxt cx, @ast.local local) -> result {
4180 // Make a note to drop this slot on the way out.
4181 check (cx.fcx.lllocals.contains_key(local.id));
4182 auto llptr = cx.fcx.lllocals.get(local.id);
4183 auto ty = node_ann_type(cx.fcx.ccx, local.ann);
4186 find_scope_cx(cx).cleanups +=
4187 clean(bind drop_slot(_, llptr, ty));
4190 case (some[@ast.expr](?e)) {
4191 auto sub = trans_expr(bcx, e);
4192 bcx = copy_ty(sub.bcx, INIT, llptr, sub.val, ty).bcx;
4195 if (middle.ty.type_has_dynamic_size(ty)) {
4196 auto llsz = size_of(bcx, ty);
4197 bcx = call_bzero(llsz.bcx, llptr, llsz.val).bcx;
4200 auto llty = type_of(bcx.fcx.ccx, ty);
4201 auto null = lib.llvm.llvm.LLVMConstNull(llty);
4202 bcx.build.Store(null, llptr);
4206 ret res(bcx, llptr);
4209 fn trans_stmt(@block_ctxt cx, &ast.stmt s) -> result {
4212 case (ast.stmt_expr(?e)) {
4213 bcx = trans_expr(cx, e).bcx;
4216 case (ast.stmt_decl(?d)) {
4218 case (ast.decl_local(?local)) {
4219 bcx = init_local(bcx, local).bcx;
4221 case (ast.decl_item(?i)) {
4222 trans_item(cx.fcx.ccx, *i);
4227 cx.fcx.ccx.sess.unimpl("stmt variant");
4230 ret res(bcx, C_nil());
4233 fn new_builder(BasicBlockRef llbb) -> builder {
4234 let BuilderRef llbuild = llvm.LLVMCreateBuilder();
4235 llvm.LLVMPositionBuilderAtEnd(llbuild, llbb);
4236 ret builder(llbuild);
4239 // You probably don't want to use this one. See the
4240 // next three functions instead.
4241 fn new_block_ctxt(@fn_ctxt cx, block_parent parent,
4243 str name) -> @block_ctxt {
4244 let vec[cleanup] cleanups = vec();
4245 let BasicBlockRef llbb =
4246 llvm.LLVMAppendBasicBlock(cx.llfn,
4247 _str.buf(cx.ccx.names.next(name)));
4250 build=new_builder(llbb),
4253 mutable cleanups=cleanups,
4257 // Use this when you're at the top block of a function or the like.
4258 fn new_top_block_ctxt(@fn_ctxt fcx) -> @block_ctxt {
4259 auto cx = new_block_ctxt(fcx, parent_none, SCOPE_BLOCK,
4260 "function top level");
4262 // FIXME: hack to give us some spill room to make up for an LLVM
4263 // bug where it destroys its own callee-saves.
4264 cx.build.Alloca(T_array(T_int(), 10u));
4268 // Use this when you're at a curly-brace or similar lexical scope.
4269 fn new_scope_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
4270 ret new_block_ctxt(bcx.fcx, parent_some(bcx), SCOPE_BLOCK, n);
4273 // Use this when you're making a general CFG BB within a scope.
4274 fn new_sub_block_ctxt(@block_ctxt bcx, str n) -> @block_ctxt {
4275 ret new_block_ctxt(bcx.fcx, parent_some(bcx), NON_SCOPE_BLOCK, n);
4279 fn trans_block_cleanups(@block_ctxt cx,
4280 @block_ctxt cleanup_cx) -> @block_ctxt {
4283 if (cleanup_cx.kind != SCOPE_BLOCK) {
4284 check (_vec.len[cleanup](cleanup_cx.cleanups) == 0u);
4287 auto i = _vec.len[cleanup](cleanup_cx.cleanups);
4290 auto c = cleanup_cx.cleanups.(i);
4292 case (clean(?cfn)) {
4300 iter block_locals(&ast.block b) -> @ast.local {
4301 // FIXME: putting from inside an iter block doesn't work, so we can't
4302 // use the index here.
4303 for (@ast.stmt s in b.node.stmts) {
4305 case (ast.stmt_decl(?d)) {
4307 case (ast.decl_local(?local)) {
4310 case (_) { /* fall through */ }
4313 case (_) { /* fall through */ }
4318 fn alloc_ty(@block_ctxt cx, @ty.t t) -> result {
4319 auto val = C_int(0);
4321 if (ty.type_has_dynamic_size(t)) {
4322 auto n = size_of(bcx, t);
4324 val = bcx.build.ArrayAlloca(T_i8(), n.val);
4326 val = bcx.build.Alloca(type_of(cx.fcx.ccx, t));
4331 fn alloc_local(@block_ctxt cx, @ast.local local) -> result {
4332 auto t = node_ann_type(cx.fcx.ccx, local.ann);
4333 auto r = alloc_ty(cx, t);
4334 r.bcx.fcx.lllocals.insert(local.id, r.val);
4338 fn trans_block(@block_ctxt cx, &ast.block b) -> result {
4341 for each (@ast.local local in block_locals(b)) {
4342 bcx = alloc_local(bcx, local).bcx;
4344 auto r = res(bcx, C_nil());
4346 for (@ast.stmt s in b.node.stmts) {
4347 r = trans_stmt(bcx, *s);
4349 // If we hit a terminator, control won't go any further so
4350 // we're in dead-code land. Stop here.
4351 if (is_terminated(bcx)) {
4357 case (some[@ast.expr](?e)) {
4358 r = trans_expr(bcx, e);
4360 if (is_terminated(bcx)) {
4364 case (none[@ast.expr]) {
4365 r = res(bcx, C_nil());
4369 bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
4370 ret res(bcx, r.val);
4373 // NB: must keep 4 fns in sync:
4375 // - type_of_fn_full
4376 // - create_llargs_for_fn_args.
4380 fn new_fn_ctxt(@crate_ctxt cx,
4381 ValueRef llfndecl) -> @fn_ctxt {
4383 let ValueRef llretptr = llvm.LLVMGetParam(llfndecl, 0u);
4384 let ValueRef lltaskptr = llvm.LLVMGetParam(llfndecl, 1u);
4385 let ValueRef llenv = llvm.LLVMGetParam(llfndecl, 2u);
4387 let hashmap[ast.def_id, ValueRef] llargs = new_def_hash[ValueRef]();
4388 let hashmap[ast.def_id, ValueRef] llobjfields = new_def_hash[ValueRef]();
4389 let hashmap[ast.def_id, ValueRef] lllocals = new_def_hash[ValueRef]();
4390 let hashmap[ast.def_id, ValueRef] lltydescs = new_def_hash[ValueRef]();
4392 ret @rec(llfn=llfndecl,
4393 lltaskptr=lltaskptr,
4396 mutable llself=none[ValueRef],
4397 mutable lliterbody=none[ValueRef],
4399 llobjfields=llobjfields,
4401 lltydescs=lltydescs,
4405 // NB: must keep 4 fns in sync:
4407 // - type_of_fn_full
4408 // - create_llargs_for_fn_args.
4412 fn create_llargs_for_fn_args(&@fn_ctxt cx,
4414 option.t[TypeRef] ty_self,
4417 &vec[ast.ty_param] ty_params) {
4420 case (some[TypeRef](_)) {
4421 cx.llself = some[ValueRef](cx.llenv);
4429 if (ty_self == none[TypeRef]) {
4430 for (ast.ty_param tp in ty_params) {
4431 auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
4432 check (llarg as int != 0);
4433 cx.lltydescs.insert(tp.id, llarg);
4438 if (proto == ast.proto_iter) {
4439 auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
4440 check (llarg as int != 0);
4441 cx.lliterbody = some[ValueRef](llarg);
4445 for (ast.arg arg in args) {
4446 auto llarg = llvm.LLVMGetParam(cx.llfn, arg_n);
4447 check (llarg as int != 0);
4448 cx.llargs.insert(arg.id, llarg);
4453 // Recommended LLVM style, strange though this is, is to copy from args to
4454 // allocas immediately upon entry; this permits us to GEP into structures we
4455 // were passed and whatnot. Apparently mem2reg will mop up.
4457 fn copy_args_to_allocas(@block_ctxt cx,
4458 option.t[TypeRef] ty_self,
4460 vec[ty.arg] arg_tys) {
4462 let uint arg_n = 0u;
4464 alt (cx.fcx.llself) {
4465 case (some[ValueRef](?self_v)) {
4467 case (some[TypeRef](?self_t)) {
4468 auto alloca = cx.build.Alloca(self_t);
4469 cx.build.Store(self_v, alloca);
4470 cx.fcx.llself = some[ValueRef](alloca);
4478 for (ast.arg aarg in args) {
4479 if (aarg.mode != ast.alias) {
4480 auto arg_t = type_of_arg(cx.fcx.ccx, arg_tys.(arg_n));
4481 auto alloca = cx.build.Alloca(arg_t);
4482 auto argval = cx.fcx.llargs.get(aarg.id);
4483 cx.build.Store(argval, alloca);
4484 // Overwrite the llargs entry for this arg with its alloca.
4485 cx.fcx.llargs.insert(aarg.id, alloca);
4492 fn is_terminated(@block_ctxt cx) -> bool {
4493 auto inst = llvm.LLVMGetLastInstruction(cx.llbb);
4494 ret llvm.LLVMIsATerminatorInst(inst) as int != 0;
4497 fn arg_tys_of_fn(ast.ann ann) -> vec[ty.arg] {
4498 alt (ty.ann_to_type(ann).struct) {
4499 case (ty.ty_fn(_, ?arg_tys, _)) {
4506 fn ret_ty_of_fn_ty(@ty.t t) -> @ty.t {
4508 case (ty.ty_fn(_, _, ?ret_ty)) {
4516 fn ret_ty_of_fn(ast.ann ann) -> @ty.t {
4517 ret ret_ty_of_fn_ty(ty.ann_to_type(ann));
4520 fn populate_fn_ctxt_from_llself(@block_ctxt cx, ValueRef llself) -> result {
4523 let vec[@ty.t] field_tys = vec();
4525 for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
4526 field_tys += vec(node_ann_type(bcx.fcx.ccx, f.ann));
4529 // Synthesize a tuple type for the fields so that GEP_tup_like() can work
4531 auto fields_tup_ty = ty.plain_ty(ty.ty_tup(field_tys));
4533 auto n_typarams = _vec.len[ast.ty_param](bcx.fcx.ccx.obj_typarams);
4534 let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.ccx.tn, n_typarams);
4537 bcx.build.GEP(llself,
4539 C_int(abi.obj_field_box)));
4541 auto box_ptr = bcx.build.Load(box_cell);
4543 box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
4545 auto obj_typarams = bcx.build.GEP(box_ptr,
4547 C_int(abi.box_rc_field_body),
4548 C_int(abi.obj_body_elt_typarams)));
4550 // The object fields immediately follow the type parameters, so we skip
4551 // over them to get the pointer.
4552 auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams),
4553 llsize_of(llvm.LLVMGetElementType(val_ty(obj_typarams))));
4555 // If we can (i.e. the type is statically sized), then cast the resulting
4556 // fields pointer to the appropriate LLVM type. If not, just leave it as
4558 if (!ty.type_has_dynamic_size(fields_tup_ty)) {
4559 auto llfields_ty = type_of(bcx.fcx.ccx, fields_tup_ty);
4560 obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
4562 obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8()));
4568 for (ast.ty_param p in bcx.fcx.ccx.obj_typarams) {
4569 let ValueRef lltyparam = bcx.build.GEP(obj_typarams,
4572 lltyparam = bcx.build.Load(lltyparam);
4573 bcx.fcx.lltydescs.insert(p.id, lltyparam);
4578 for (ast.obj_field f in bcx.fcx.ccx.obj_fields) {
4579 auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, vec(0, i));
4581 auto llfield = rslt.val;
4582 cx.fcx.llobjfields.insert(f.id, llfield);
4586 ret res(bcx, C_nil());
4589 fn trans_fn(@crate_ctxt cx, &ast._fn f, ast.def_id fid,
4590 option.t[TypeRef] ty_self,
4591 &vec[ast.ty_param] ty_params, &ast.ann ann) {
4593 auto llfndecl = cx.item_ids.get(fid);
4594 cx.item_names.insert(cx.path, llfndecl);
4596 auto fcx = new_fn_ctxt(cx, llfndecl);
4597 create_llargs_for_fn_args(fcx, f.proto,
4598 ty_self, ret_ty_of_fn(ann),
4599 f.decl.inputs, ty_params);
4600 auto bcx = new_top_block_ctxt(fcx);
4602 copy_args_to_allocas(bcx, ty_self, f.decl.inputs,
4603 arg_tys_of_fn(ann));
4606 case (some[ValueRef](?llself)) {
4607 bcx = populate_fn_ctxt_from_llself(bcx, llself).bcx;
4613 auto res = trans_block(bcx, f.body);
4614 if (!is_terminated(res.bcx)) {
4615 // FIXME: until LLVM has a unit type, we are moving around
4616 // C_nil values rather than their void type.
4617 res.bcx.build.RetVoid();
4621 fn trans_vtbl(@crate_ctxt cx, TypeRef self_ty,
4623 &vec[ast.ty_param] ty_params) -> ValueRef {
4624 let vec[ValueRef] methods = vec();
4626 fn meth_lteq(&@ast.method a, &@ast.method b) -> bool {
4627 ret _str.lteq(a.node.ident, b.node.ident);
4630 auto meths = std.sort.merge_sort[@ast.method](bind meth_lteq(_,_),
4633 for (@ast.method m in meths) {
4635 auto llfnty = T_nil();
4636 alt (node_ann_type(cx, m.node.ann).struct) {
4637 case (ty.ty_fn(?proto, ?inputs, ?output)) {
4638 llfnty = type_of_fn_full(cx, proto,
4639 some[TypeRef](self_ty),
4644 let @crate_ctxt mcx = @rec(path=cx.path + sep() + m.node.ident
4647 let str s = cx.names.next("_rust_method") + sep() + mcx.path;
4648 let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfnty);
4649 cx.item_ids.insert(m.node.id, llfn);
4651 trans_fn(mcx, m.node.meth, m.node.id, some[TypeRef](self_ty),
4652 ty_params, m.node.ann);
4655 auto vtbl = C_struct(methods);
4656 auto gvar = llvm.LLVMAddGlobal(cx.llmod,
4658 _str.buf("_rust_vtbl" + sep() + cx.path));
4659 llvm.LLVMSetInitializer(gvar, vtbl);
4660 llvm.LLVMSetGlobalConstant(gvar, True);
4661 llvm.LLVMSetLinkage(gvar, lib.llvm.LLVMPrivateLinkage
4666 fn trans_obj(@crate_ctxt cx, &ast._obj ob, ast.def_id oid,
4667 &vec[ast.ty_param] ty_params, &ast.ann ann) {
4669 auto llctor_decl = cx.item_ids.get(oid);
4670 cx.item_names.insert(cx.path, llctor_decl);
4672 // Translate obj ctor args to function arguments.
4673 let vec[ast.arg] fn_args = vec();
4674 for (ast.obj_field f in ob.fields) {
4675 fn_args += vec(rec(mode=ast.alias,
4681 auto fcx = new_fn_ctxt(cx, llctor_decl);
4682 create_llargs_for_fn_args(fcx, ast.proto_fn,
4683 none[TypeRef], ret_ty_of_fn(ann),
4684 fn_args, ty_params);
4686 auto bcx = new_top_block_ctxt(fcx);
4688 let vec[ty.arg] arg_tys = arg_tys_of_fn(ann);
4689 copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
4691 auto llself_ty = type_of(cx, ret_ty_of_fn(ann));
4692 auto pair = bcx.fcx.llretptr;
4693 auto vtbl = trans_vtbl(cx, llself_ty, ob, ty_params);
4694 auto pair_vtbl = bcx.build.GEP(pair,
4696 C_int(abi.obj_field_vtbl)));
4697 auto pair_box = bcx.build.GEP(pair,
4699 C_int(abi.obj_field_box)));
4700 bcx.build.Store(vtbl, pair_vtbl);
4702 let TypeRef llbox_ty = T_opaque_obj_ptr(cx.tn);
4704 if (_vec.len[ast.ty_param](ty_params) == 0u &&
4705 _vec.len[ty.arg](arg_tys) == 0u) {
4706 // Store null into pair, if no args or typarams.
4707 bcx.build.Store(C_null(llbox_ty), pair_box);
4709 // Malloc a box for the body and copy args in.
4710 let vec[@ty.t] obj_fields = vec();
4711 for (ty.arg a in arg_tys) {
4712 append[@ty.t](obj_fields, a.ty);
4715 // Synthesize an obj body type.
4716 auto tydesc_ty = plain_ty(ty.ty_type);
4717 let vec[@ty.t] tps = vec();
4718 for (ast.ty_param tp in ty_params) {
4719 append[@ty.t](tps, tydesc_ty);
4722 let @ty.t typarams_ty = plain_ty(ty.ty_tup(tps));
4723 let @ty.t fields_ty = plain_ty(ty.ty_tup(obj_fields));
4724 let @ty.t body_ty = plain_ty(ty.ty_tup(vec(tydesc_ty,
4727 let @ty.t boxed_body_ty = plain_ty(ty.ty_box(body_ty));
4729 // Malloc a box for the body.
4730 auto box = trans_malloc_boxed(bcx, body_ty);
4732 auto rc = GEP_tup_like(bcx, boxed_body_ty, box.val,
4733 vec(0, abi.box_rc_field_refcnt));
4735 auto body = GEP_tup_like(bcx, boxed_body_ty, box.val,
4736 vec(0, abi.box_rc_field_body));
4738 bcx.build.Store(C_int(1), rc.val);
4740 // Store body tydesc.
4742 GEP_tup_like(bcx, body_ty, body.val,
4743 vec(0, abi.obj_body_elt_tydesc));
4744 bcx = body_tydesc.bcx;
4746 auto body_td = get_tydesc(bcx, body_ty);
4748 bcx.build.Store(body_td.val, body_tydesc.val);
4750 // Copy typarams into captured typarams.
4751 auto body_typarams =
4752 GEP_tup_like(bcx, body_ty, body.val,
4753 vec(0, abi.obj_body_elt_typarams));
4754 bcx = body_typarams.bcx;
4756 for (ast.ty_param tp in ty_params) {
4757 auto typaram = bcx.fcx.lltydescs.get(tp.id);
4758 auto capture = GEP_tup_like(bcx, typarams_ty, body_typarams.val,
4761 bcx = copy_ty(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
4765 // Copy args into body fields.
4767 GEP_tup_like(bcx, body_ty, body.val,
4768 vec(0, abi.obj_body_elt_fields));
4769 bcx = body_fields.bcx;
4772 for (ast.obj_field f in ob.fields) {
4773 auto arg = bcx.fcx.llargs.get(f.id);
4774 arg = load_scalar_or_boxed(bcx, arg, arg_tys.(i).ty);
4775 auto field = GEP_tup_like(bcx, fields_ty, body_fields.val,
4778 bcx = copy_ty(bcx, INIT, field.val, arg, arg_tys.(i).ty).bcx;
4781 // Store box ptr in outer pair.
4782 auto p = bcx.build.PointerCast(box.val, llbox_ty);
4783 bcx.build.Store(p, pair_box);
4785 bcx.build.RetVoid();
4788 fn trans_tag_variant(@crate_ctxt cx, ast.def_id tag_id,
4789 &ast.variant variant, int index,
4790 &vec[ast.ty_param] ty_params) {
4791 if (_vec.len[ast.variant_arg](variant.args) == 0u) {
4792 ret; // nullary constructors are just constants
4795 // Translate variant arguments to function arguments.
4796 let vec[ast.arg] fn_args = vec();
4798 for (ast.variant_arg varg in variant.args) {
4799 fn_args += vec(rec(mode=ast.alias,
4801 ident="arg" + _uint.to_str(i, 10u),
4805 check (cx.item_ids.contains_key(variant.id));
4806 let ValueRef llfndecl = cx.item_ids.get(variant.id);
4808 auto fcx = new_fn_ctxt(cx, llfndecl);
4809 create_llargs_for_fn_args(fcx, ast.proto_fn,
4810 none[TypeRef], ret_ty_of_fn(variant.ann),
4811 fn_args, ty_params);
4813 auto bcx = new_top_block_ctxt(fcx);
4815 auto arg_tys = arg_tys_of_fn(variant.ann);
4816 copy_args_to_allocas(bcx, none[TypeRef], fn_args, arg_tys);
4818 // Cast the tag to a type we can GEP into.
4819 auto lltagptr = bcx.build.PointerCast(fcx.llretptr,
4820 T_opaque_tag_ptr(fcx.ccx.tn));
4822 auto lldiscrimptr = bcx.build.GEP(lltagptr,
4823 vec(C_int(0), C_int(0)));
4824 bcx.build.Store(C_int(index), lldiscrimptr);
4826 auto llblobptr = bcx.build.GEP(lltagptr,
4827 vec(C_int(0), C_int(1)));
4830 for (ast.variant_arg va in variant.args) {
4831 auto rslt = GEP_tag(bcx, llblobptr, variant, i as int);
4833 auto lldestptr = rslt.val;
4835 // If this argument to this function is a tag, it'll have come in to
4836 // this function as an opaque blob due to the way that type_of()
4837 // works. So we have to cast to the destination's view of the type.
4838 auto llargptr = bcx.build.PointerCast(fcx.llargs.get(va.id),
4841 auto arg_ty = arg_tys.(i).ty;
4843 if (ty.type_is_structural(arg_ty)) {
4844 llargval = llargptr;
4846 llargval = bcx.build.Load(llargptr);
4849 rslt = copy_ty(bcx, INIT, lldestptr, llargval, arg_ty);
4855 bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
4856 bcx.build.RetVoid();
4859 // FIXME: this should do some structural hash-consing to avoid
4860 // duplicate constants. I think. Maybe LLVM has a magical mode
4861 // that does so later on?
4863 fn trans_const_expr(@crate_ctxt cx, @ast.expr e) -> ValueRef {
4865 case (ast.expr_lit(?lit, ?ann)) {
4866 ret trans_lit(cx, *lit, ann);
4871 fn trans_const(@crate_ctxt cx, @ast.expr e,
4872 &ast.def_id cid, &ast.ann ann) {
4873 auto t = node_ann_type(cx, ann);
4874 auto v = trans_const_expr(cx, e);
4875 if (ty.type_is_scalar(t)) {
4876 // The scalars come back as 1st class LLVM vals
4877 // which we have to stick into global constants.
4878 auto g = llvm.LLVMAddGlobal(cx.llmod, val_ty(v),
4879 _str.buf(cx.names.next(cx.path)));
4880 llvm.LLVMSetInitializer(g, v);
4881 llvm.LLVMSetGlobalConstant(g, True);
4882 llvm.LLVMSetLinkage(g, lib.llvm.LLVMPrivateLinkage
4884 cx.consts.insert(cid, g);
4886 cx.consts.insert(cid, v);
4890 fn trans_item(@crate_ctxt cx, &ast.item item) {
4892 case (ast.item_fn(?name, ?f, ?tps, ?fid, ?ann)) {
4893 auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
4894 trans_fn(sub_cx, f, fid, none[TypeRef], tps, ann);
4896 case (ast.item_obj(?name, ?ob, ?tps, ?oid, ?ann)) {
4897 auto sub_cx = @rec(path=cx.path + sep() + name,
4899 obj_fields=ob.fields with *cx);
4900 trans_obj(sub_cx, ob, oid, tps, ann);
4902 case (ast.item_mod(?name, ?m, _)) {
4903 auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
4904 trans_mod(sub_cx, m);
4906 case (ast.item_tag(?name, ?variants, ?tps, ?tag_id)) {
4907 auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
4909 for (ast.variant variant in variants) {
4910 trans_tag_variant(sub_cx, tag_id, variant, i, tps);
4914 case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
4915 auto sub_cx = @rec(path=cx.path + sep() + name with *cx);
4916 trans_const(sub_cx, expr, cid, ann);
4918 case (_) { /* fall through */ }
4922 fn trans_mod(@crate_ctxt cx, &ast._mod m) {
4923 for (@ast.item item in m.items) {
4924 trans_item(cx, *item);
4928 fn get_pair_fn_ty(TypeRef llpairty) -> TypeRef {
4929 // Bit of a kludge: pick the fn typeref out of the pair.
4930 let vec[TypeRef] pair_tys = vec(T_nil(), T_nil());
4931 llvm.LLVMGetStructElementTypes(llpairty,
4932 _vec.buf[TypeRef](pair_tys));
4933 ret llvm.LLVMGetElementType(pair_tys.(0));
4936 fn decl_fn_and_pair(@crate_ctxt cx,
4942 auto llpairty = node_type(cx, ann);
4943 auto llfty = get_pair_fn_ty(llpairty);
4945 // Declare the function itself.
4946 let str s = cx.names.next("_rust_" + kind) + sep() + name;
4947 let ValueRef llfn = decl_fastcall_fn(cx.llmod, s, llfty);
4949 // Declare the global constant pair that points to it.
4950 let str ps = cx.names.next("_rust_" + kind + "_pair") + sep() + name;
4952 register_fn_pair(cx, ps, llpairty, llfn, id);
4955 fn register_fn_pair(@crate_ctxt cx, str ps, TypeRef llpairty, ValueRef llfn,
4957 let ValueRef gvar = llvm.LLVMAddGlobal(cx.llmod, llpairty,
4959 auto pair = C_struct(vec(llfn,
4960 C_null(T_opaque_closure_ptr(cx.tn))));
4962 llvm.LLVMSetInitializer(gvar, pair);
4963 llvm.LLVMSetGlobalConstant(gvar, True);
4964 llvm.LLVMSetLinkage(gvar,
4965 lib.llvm.LLVMPrivateLinkage
4968 cx.item_ids.insert(id, llfn);
4969 cx.fn_pairs.insert(id, gvar);
4972 fn native_fn_wrapper_type(@crate_ctxt cx, &ast.ann ann) -> TypeRef {
4973 auto x = node_ann_type(cx, ann);
4975 case (ty.ty_native_fn(?abi, ?args, ?out)) {
4976 ret type_of_fn(cx, ast.proto_fn, args, out);
4982 fn decl_native_fn_and_pair(@crate_ctxt cx,
4986 // Declare the wrapper.
4987 auto wrapper_type = native_fn_wrapper_type(cx, ann);
4988 let str s = cx.names.next("_rust_wrapper") + sep() + name;
4989 let ValueRef wrapper_fn = decl_fastcall_fn(cx.llmod, s, wrapper_type);
4991 // Declare the global constant pair that points to it.
4992 auto wrapper_pair_type = T_fn_pair(cx.tn, wrapper_type);
4993 let str ps = cx.names.next("_rust_wrapper_pair") + sep() + name;
4995 register_fn_pair(cx, ps, wrapper_pair_type, wrapper_fn, id);
4997 // Declare the function itself.
4998 auto llfty = get_pair_fn_ty(node_type(cx, ann));
4999 auto function = decl_cdecl_fn(cx.llmod, name, llfty);
5001 // Build the wrapper.
5002 auto fcx = new_fn_ctxt(cx, wrapper_fn);
5003 auto bcx = new_top_block_ctxt(fcx);
5004 auto fn_type = node_ann_type(cx, ann);
5006 let vec[ValueRef] call_args = vec();
5007 auto abi = ty.ty_fn_abi(fn_type);
5010 case (ast.native_abi_rust) {
5011 call_args += vec(fcx.lltaskptr);
5012 auto num_ty_param = ty.count_ty_params(plain_ty(fn_type.struct));
5013 for each (uint i in _uint.range(0u, num_ty_param)) {
5014 auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
5015 check (llarg as int != 0);
5016 call_args += vec(llarg);
5020 case (ast.native_abi_cdecl) {
5023 auto args = ty.ty_fn_args(fn_type);
5024 for (ty.arg arg in args) {
5025 auto llarg = llvm.LLVMGetParam(fcx.llfn, arg_n);
5026 check (llarg as int != 0);
5027 call_args += vec(llarg);
5030 auto r = bcx.build.Call(function, call_args);
5031 bcx.build.Store(r, fcx.llretptr);
5032 bcx.build.RetVoid();
5035 fn collect_native_item(&@crate_ctxt cx, @ast.native_item i) -> @crate_ctxt {
5037 case (ast.native_item_fn(?name, _, _, ?fid, ?ann)) {
5038 cx.native_items.insert(fid, i);
5039 if (! cx.obj_methods.contains_key(fid)) {
5040 decl_native_fn_and_pair(cx, name, ann, fid);
5043 case (_) { /* fall through */ }
5048 fn collect_item(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
5051 case (ast.item_fn(?name, ?f, _, ?fid, ?ann)) {
5052 cx.items.insert(fid, i);
5053 if (! cx.obj_methods.contains_key(fid)) {
5054 decl_fn_and_pair(cx, "fn", name, ann, fid);
5058 case (ast.item_obj(?name, ?ob, _, ?oid, ?ann)) {
5059 cx.items.insert(oid, i);
5060 decl_fn_and_pair(cx, "obj_ctor", name, ann, oid);
5061 for (@ast.method m in ob.methods) {
5062 cx.obj_methods.insert(m.node.id, ());
5066 case (ast.item_const(?name, _, _, ?cid, _)) {
5067 cx.items.insert(cid, i);
5070 case (ast.item_mod(?name, ?m, ?mid)) {
5071 cx.items.insert(mid, i);
5074 case (ast.item_tag(_, ?variants, ?tps, ?tag_id)) {
5075 cx.items.insert(tag_id, i);
5078 case (_) { /* fall through */ }
5084 fn collect_items(@crate_ctxt cx, @ast.crate crate) {
5086 let fold.ast_fold[@crate_ctxt] fld =
5087 fold.new_identity_fold[@crate_ctxt]();
5089 fld = @rec( update_env_for_item = bind collect_item(_,_),
5090 update_env_for_native_item = bind collect_native_item(_,_)
5093 fold.fold_crate[@crate_ctxt](cx, fld, crate);
5096 fn collect_tag_ctor(&@crate_ctxt cx, @ast.item i) -> @crate_ctxt {
5100 case (ast.item_tag(_, ?variants, _, _)) {
5101 for (ast.variant variant in variants) {
5102 if (_vec.len[ast.variant_arg](variant.args) != 0u) {
5103 decl_fn_and_pair(cx, "tag", variant.name,
5104 variant.ann, variant.id);
5109 case (_) { /* fall through */ }
5114 fn collect_tag_ctors(@crate_ctxt cx, @ast.crate crate) {
5116 let fold.ast_fold[@crate_ctxt] fld =
5117 fold.new_identity_fold[@crate_ctxt]();
5119 fld = @rec( update_env_for_item = bind collect_tag_ctor(_,_)
5122 fold.fold_crate[@crate_ctxt](cx, fld, crate);
5126 // The constant translation pass.
5128 fn trans_constant(&@crate_ctxt cx, @ast.item it) -> @crate_ctxt {
5130 case (ast.item_tag(_, ?variants, _, ?tag_id)) {
5132 auto n_variants = _vec.len[ast.variant](variants);
5133 while (i < n_variants) {
5134 auto variant = variants.(i);
5136 auto discrim_val = C_int(i as int);
5138 // FIXME: better name.
5139 auto discrim_gvar = llvm.LLVMAddGlobal(cx.llmod, T_int(),
5140 _str.buf("tag_discrim"));
5142 // FIXME: Eventually we do want to export these, but we need
5143 // to figure out what name they get first!
5144 llvm.LLVMSetInitializer(discrim_gvar, discrim_val);
5145 llvm.LLVMSetGlobalConstant(discrim_gvar, True);
5146 llvm.LLVMSetLinkage(discrim_gvar, lib.llvm.LLVMPrivateLinkage
5149 cx.discrims.insert(variant.id, discrim_gvar);
5155 case (ast.item_const(?name, _, ?expr, ?cid, ?ann)) {
5156 // FIXME: The whole expr-translation system needs cloning to deal
5159 cx.item_ids.insert(cid, v);
5170 fn trans_constants(@crate_ctxt cx, @ast.crate crate) {
5171 let fold.ast_fold[@crate_ctxt] fld =
5172 fold.new_identity_fold[@crate_ctxt]();
5174 fld = @rec(update_env_for_item = bind trans_constant(_,_) with *fld);
5176 fold.fold_crate[@crate_ctxt](cx, fld, crate);
5180 fn vp2i(@block_ctxt cx, ValueRef v) -> ValueRef {
5181 ret cx.build.PtrToInt(v, T_int());
5185 fn vi2p(@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
5186 ret cx.build.IntToPtr(v, t);
5189 fn p2i(ValueRef v) -> ValueRef {
5190 ret llvm.LLVMConstPtrToInt(v, T_int());
5193 fn i2p(ValueRef v, TypeRef t) -> ValueRef {
5194 ret llvm.LLVMConstIntToPtr(v, t);
5197 fn trans_exit_task_glue(@crate_ctxt cx) {
5198 let vec[TypeRef] T_args = vec();
5199 let vec[ValueRef] V_args = vec();
5201 auto llfn = cx.glues.exit_task_glue;
5202 let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 3u);
5203 auto fcx = @rec(llfn=llfn,
5204 lltaskptr=lltaskptr,
5205 llenv=C_null(T_opaque_closure_ptr(cx.tn)),
5206 llretptr=C_null(T_ptr(T_nil())),
5207 mutable llself=none[ValueRef],
5208 mutable lliterbody=none[ValueRef],
5209 llargs=new_def_hash[ValueRef](),
5210 llobjfields=new_def_hash[ValueRef](),
5211 lllocals=new_def_hash[ValueRef](),
5212 lltydescs=new_def_hash[ValueRef](),
5215 auto bcx = new_top_block_ctxt(fcx);
5216 trans_upcall(bcx, "upcall_exit", V_args);
5217 bcx.build.RetVoid();
5220 fn create_typedefs(@crate_ctxt cx) {
5221 llvm.LLVMAddTypeName(cx.llmod, _str.buf("crate"), T_crate(cx.tn));
5222 llvm.LLVMAddTypeName(cx.llmod, _str.buf("task"), T_task(cx.tn));
5223 llvm.LLVMAddTypeName(cx.llmod, _str.buf("tydesc"), T_tydesc(cx.tn));
5226 fn create_crate_constant(@crate_ctxt cx) {
5228 let ValueRef crate_addr = p2i(cx.crate_ptr);
5230 let ValueRef activate_glue_off =
5231 llvm.LLVMConstSub(p2i(cx.glues.activate_glue), crate_addr);
5233 let ValueRef yield_glue_off =
5234 llvm.LLVMConstSub(p2i(cx.glues.yield_glue), crate_addr);
5236 let ValueRef exit_task_glue_off =
5237 llvm.LLVMConstSub(p2i(cx.glues.exit_task_glue), crate_addr);
5239 let ValueRef crate_val =
5240 C_struct(vec(C_null(T_int()), // ptrdiff_t image_base_off
5241 p2i(cx.crate_ptr), // uintptr_t self_addr
5242 C_null(T_int()), // ptrdiff_t debug_abbrev_off
5243 C_null(T_int()), // size_t debug_abbrev_sz
5244 C_null(T_int()), // ptrdiff_t debug_info_off
5245 C_null(T_int()), // size_t debug_info_sz
5246 activate_glue_off, // size_t activate_glue_off
5247 yield_glue_off, // size_t yield_glue_off
5248 C_null(T_int()), // size_t unwind_glue_off
5249 C_null(T_int()), // size_t gc_glue_off
5250 exit_task_glue_off, // size_t main_exit_task_glue_off
5251 C_null(T_int()), // int n_rust_syms
5252 C_null(T_int()), // int n_c_syms
5253 C_null(T_int()), // int n_libs
5254 C_int(abi.abi_x86_rustc_fastcall) // uintptr_t abi_tag
5257 llvm.LLVMSetInitializer(cx.crate_ptr, crate_val);
5260 fn find_main_fn(@crate_ctxt cx) -> ValueRef {
5261 auto e = sep() + "main";
5262 let ValueRef v = C_nil();
5264 for each (tup(str,ValueRef) i in cx.item_names.items()) {
5265 if (_str.ends_with(i._0, e)) {
5272 cx.sess.err("main fn not found");
5278 cx.sess.err("multiple main fns found");
5284 fn trans_main_fn(@crate_ctxt cx, ValueRef llcrate) {
5285 auto T_main_args = vec(T_int(), T_int());
5286 auto T_rust_start_args = vec(T_int(), T_int(), T_int(), T_int());
5289 if (_str.eq(std.os.target_os(), "win32")) {
5290 main_name = "WinMain@16";
5296 decl_cdecl_fn(cx.llmod, main_name, T_fn(T_main_args, T_int()));
5298 auto llrust_start = decl_cdecl_fn(cx.llmod, "rust_start",
5299 T_fn(T_rust_start_args, T_int()));
5301 auto llargc = llvm.LLVMGetParam(llmain, 0u);
5302 auto llargv = llvm.LLVMGetParam(llmain, 1u);
5303 auto llrust_main = find_main_fn(cx);
5306 // Emit the moral equivalent of:
5308 // main(int argc, char **argv) {
5309 // rust_start(&_rust.main, &crate, argc, argv);
5313 let BasicBlockRef llbb =
5314 llvm.LLVMAppendBasicBlock(llmain, _str.buf(""));
5315 auto b = new_builder(llbb);
5317 auto start_args = vec(p2i(llrust_main), p2i(llcrate), llargc, llargv);
5319 b.Ret(b.Call(llrust_start, start_args));
5322 fn declare_intrinsics(ModuleRef llmod) -> hashmap[str,ValueRef] {
5324 let vec[TypeRef] T_trap_args = vec();
5325 auto trap = decl_cdecl_fn(llmod, "llvm.trap",
5326 T_fn(T_trap_args, T_void()));
5328 auto intrinsics = new_str_hash[ValueRef]();
5329 intrinsics.insert("llvm.trap", trap);
5334 fn trace_str(@block_ctxt cx, str s) {
5335 trans_upcall(cx, "upcall_trace_str", vec(p2i(C_cstr(cx.fcx.ccx, s))));
5338 fn trace_word(@block_ctxt cx, ValueRef v) {
5339 trans_upcall(cx, "upcall_trace_word", vec(v));
5342 fn trace_ptr(@block_ctxt cx, ValueRef v) {
5343 trace_word(cx, cx.build.PtrToInt(v, T_int()));
5346 fn trap(@block_ctxt bcx) {
5347 let vec[ValueRef] v = vec();
5348 bcx.build.Call(bcx.fcx.ccx.intrinsics.get("llvm.trap"), v);
5351 fn check_module(ModuleRef llmod) {
5352 auto pm = mk_pass_manager();
5353 llvm.LLVMAddVerifierPass(pm.llpm);
5354 llvm.LLVMRunPassManager(pm.llpm, llmod);
5356 // TODO: run the linter here also, once there are llvm-c bindings for it.
5359 fn make_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
5360 auto ty = T_fn(vec(T_taskptr(tn), T_ptr(T_i8())), T_void());
5361 auto fun = decl_fastcall_fn(llmod, abi.no_op_type_glue_name(), ty);
5362 auto bb_name = _str.buf("_rust_no_op_type_glue_bb");
5363 auto llbb = llvm.LLVMAppendBasicBlock(fun, bb_name);
5364 new_builder(llbb).RetVoid();
5368 fn make_memcpy_glue(ModuleRef llmod) -> ValueRef {
5370 // We're not using the LLVM memcpy intrinsic. It appears to call through
5371 // to the platform memcpy in some cases, which is not terribly safe to run
5374 auto p8 = T_ptr(T_i8());
5376 auto ty = T_fn(vec(p8, p8, T_int()), T_void());
5377 auto fun = decl_fastcall_fn(llmod, abi.memcpy_glue_name(), ty);
5379 auto initbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("init"));
5380 auto hdrbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("hdr"));
5381 auto loopbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("loop"));
5382 auto endbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("end"));
5384 auto dst = llvm.LLVMGetParam(fun, 0u);
5385 auto src = llvm.LLVMGetParam(fun, 1u);
5386 auto count = llvm.LLVMGetParam(fun, 2u);
5389 auto ib = new_builder(initbb);
5390 auto ip = ib.Alloca(T_int());
5391 ib.Store(C_int(0), ip);
5394 // Loop-header block
5395 auto hb = new_builder(hdrbb);
5396 auto i = hb.Load(ip);
5397 hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);
5400 auto lb = new_builder(loopbb);
5402 lb.Store(lb.Load(lb.GEP(src, vec(i))),
5403 lb.GEP(dst, vec(i)));
5404 lb.Store(lb.Add(i, C_int(1)), ip);
5408 auto eb = new_builder(endbb);
5413 fn make_bzero_glue(ModuleRef llmod) -> ValueRef {
5415 // We're not using the LLVM memset intrinsic. Same as with memcpy.
5417 auto p8 = T_ptr(T_i8());
5419 auto ty = T_fn(vec(p8, T_int()), T_void());
5420 auto fun = decl_fastcall_fn(llmod, abi.bzero_glue_name(), ty);
5422 auto initbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("init"));
5423 auto hdrbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("hdr"));
5424 auto loopbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("loop"));
5425 auto endbb = llvm.LLVMAppendBasicBlock(fun, _str.buf("end"));
5427 auto dst = llvm.LLVMGetParam(fun, 0u);
5428 auto count = llvm.LLVMGetParam(fun, 1u);
5431 auto ib = new_builder(initbb);
5432 auto ip = ib.Alloca(T_int());
5433 ib.Store(C_int(0), ip);
5436 // Loop-header block
5437 auto hb = new_builder(hdrbb);
5438 auto i = hb.Load(ip);
5439 hb.CondBr(hb.ICmp(lib.llvm.LLVMIntEQ, count, i), endbb, loopbb);
5442 auto lb = new_builder(loopbb);
5444 lb.Store(C_integral(0, T_i8()), lb.GEP(dst, vec(i)));
5445 lb.Store(lb.Add(i, C_int(1)), ip);
5449 auto eb = new_builder(endbb);
5454 fn make_vec_append_glue(ModuleRef llmod, type_names tn) -> ValueRef {
5456 * Args to vec_append_glue:
5458 * 0. (Implicit) task ptr
5460 * 1. Pointer to the tydesc of the vec, so that we can tell if it's gc
5461 * mem, and have a tydesc to pass to malloc if we're allocating anew.
5463 * 2. Pointer to the tydesc of the vec's stored element type, so that
5464 * elements can be copied to a newly alloc'ed vec if one must be
5467 * 3. Dst vec ptr (i.e. ptr to ptr to rust_vec).
5469 * 4. Src vec (i.e. ptr to rust_vec).
5471 * 5. Flag indicating whether to skip trailing null on dst.
5475 auto ty = T_fn(vec(T_taskptr(tn),
5476 T_ptr(T_tydesc(tn)),
5477 T_ptr(T_tydesc(tn)),
5478 T_ptr(T_opaque_vec_ptr()),
5479 T_opaque_vec_ptr(), T_bool()),
5482 auto llfn = decl_fastcall_fn(llmod, abi.vec_append_glue_name(), ty);
5486 fn trans_vec_append_glue(@crate_ctxt cx) {
5488 auto llfn = cx.glues.vec_append_glue;
5490 let ValueRef lltaskptr = llvm.LLVMGetParam(llfn, 0u);
5491 let ValueRef llvec_tydesc = llvm.LLVMGetParam(llfn, 1u);
5492 let ValueRef llelt_tydesc = llvm.LLVMGetParam(llfn, 2u);
5493 let ValueRef lldst_vec_ptr = llvm.LLVMGetParam(llfn, 3u);
5494 let ValueRef llsrc_vec = llvm.LLVMGetParam(llfn, 4u);
5495 let ValueRef llskipnull = llvm.LLVMGetParam(llfn, 5u);
5497 auto fcx = @rec(llfn=llfn,
5498 lltaskptr=lltaskptr,
5499 llenv=C_null(T_ptr(T_nil())),
5500 llretptr=C_null(T_ptr(T_nil())),
5501 mutable llself=none[ValueRef],
5502 mutable lliterbody=none[ValueRef],
5503 llargs=new_def_hash[ValueRef](),
5504 llobjfields=new_def_hash[ValueRef](),
5505 lllocals=new_def_hash[ValueRef](),
5506 lltydescs=new_def_hash[ValueRef](),
5509 auto bcx = new_top_block_ctxt(fcx);
5511 auto lldst_vec = bcx.build.Load(lldst_vec_ptr);
5513 // First the dst vec needs to grow to accommodate the src vec.
5514 // To do this we have to figure out how many bytes to add.
5516 fn vec_fill(@block_ctxt bcx, ValueRef v) -> ValueRef {
5517 ret bcx.build.Load(bcx.build.GEP(v, vec(C_int(0),
5518 C_int(abi.vec_elt_fill))));
5521 fn put_vec_fill(@block_ctxt bcx, ValueRef v, ValueRef fill) -> ValueRef {
5522 ret bcx.build.Store(fill,
5525 C_int(abi.vec_elt_fill))));
5528 fn vec_fill_adjusted(@block_ctxt bcx, ValueRef v,
5529 ValueRef skipnull) -> ValueRef {
5530 auto f = bcx.build.Load(bcx.build.GEP(v,
5532 C_int(abi.vec_elt_fill))));
5533 ret bcx.build.Select(skipnull, bcx.build.Sub(f, C_int(1)), f);
5536 fn vec_p0(@block_ctxt bcx, ValueRef v) -> ValueRef {
5537 auto p = bcx.build.GEP(v, vec(C_int(0),
5538 C_int(abi.vec_elt_data)));
5539 ret bcx.build.PointerCast(p, T_ptr(T_i8()));
5543 fn vec_p1(@block_ctxt bcx, ValueRef v) -> ValueRef {
5544 auto len = vec_fill(bcx, v);
5545 ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
5548 fn vec_p1_adjusted(@block_ctxt bcx, ValueRef v,
5549 ValueRef skipnull) -> ValueRef {
5550 auto len = vec_fill_adjusted(bcx, v, skipnull);
5551 ret bcx.build.GEP(vec_p0(bcx, v), vec(len));
5555 auto llcopy_dst_ptr = bcx.build.Alloca(T_int());
5556 auto llnew_vec_res =
5557 trans_upcall(bcx, "upcall_vec_grow",
5558 vec(vp2i(bcx, lldst_vec),
5559 vec_fill_adjusted(bcx, llsrc_vec, llskipnull),
5560 vp2i(bcx, llcopy_dst_ptr),
5561 vp2i(bcx, llvec_tydesc)));
5563 bcx = llnew_vec_res.bcx;
5564 auto llnew_vec = vi2p(bcx, llnew_vec_res.val,
5565 T_opaque_vec_ptr());
5567 put_vec_fill(bcx, llnew_vec, C_int(0));
5569 auto copy_dst_cx = new_sub_block_ctxt(bcx, "copy new <- dst");
5570 auto copy_src_cx = new_sub_block_ctxt(bcx, "copy new <- src");
5572 auto pp0 = bcx.build.Alloca(T_ptr(T_i8()));
5573 bcx.build.Store(vec_p0(bcx, llnew_vec), pp0);
5575 bcx.build.CondBr(bcx.build.TruncOrBitCast
5576 (bcx.build.Load(llcopy_dst_ptr),
5582 fn copy_elts(@block_ctxt cx,
5583 ValueRef elt_tydesc,
5586 ValueRef n_bytes) -> result {
5588 auto src_lim = cx.build.GEP(src, vec(n_bytes));
5591 cx.build.Load(cx.build.GEP(elt_tydesc,
5593 C_int(abi.tydesc_field_size))));
5595 fn take_one(ValueRef elt_tydesc,
5596 @block_ctxt cx, ValueRef v) -> result {
5597 call_tydesc_glue_full(cx, v,
5599 abi.tydesc_field_take_glue_off);
5603 auto bcx = iter_sequence_raw(cx, src, src_lim,
5604 elt_llsz, bind take_one(elt_tydesc,
5607 ret call_memcpy(bcx, dst, src, n_bytes);
5610 // Copy any dst elements in, omitting null if doing str.
5611 auto n_bytes = vec_fill_adjusted(copy_dst_cx, lldst_vec, llskipnull);
5612 copy_dst_cx = copy_elts(copy_dst_cx,
5614 copy_dst_cx.build.Load(pp0),
5615 vec_p0(copy_dst_cx, lldst_vec),
5618 put_vec_fill(copy_dst_cx, llnew_vec, n_bytes);
5619 copy_dst_cx.build.Store(vec_p1(copy_dst_cx, llnew_vec), pp0);
5620 copy_dst_cx.build.Br(copy_src_cx.llbb);
5623 // Copy any src elements in, carrying along null if doing str.
5624 n_bytes = vec_fill(copy_src_cx, llsrc_vec);
5625 copy_src_cx = copy_elts(copy_src_cx,
5627 copy_src_cx.build.Load(pp0),
5628 vec_p0(copy_src_cx, llsrc_vec),
5631 put_vec_fill(copy_src_cx, llnew_vec,
5632 copy_src_cx.build.Add(vec_fill(copy_src_cx,
5636 // Write new_vec back through the alias we were given.
5637 copy_src_cx.build.Store(llnew_vec, lldst_vec_ptr);
5638 copy_src_cx.build.RetVoid();
5642 fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
5643 ret @rec(activate_glue = decl_glue(llmod, tn, abi.activate_glue_name()),
5644 yield_glue = decl_glue(llmod, tn, abi.yield_glue_name()),
5646 * Note: the signature passed to decl_cdecl_fn here looks unusual
5647 * because it is. It corresponds neither to an upcall signature
5648 * nor a normal rust-ABI signature. In fact it is a fake
5649 * signature, that exists solely to acquire the task pointer as
5650 * an argument to the upcall. It so happens that the runtime sets
5651 * up the task pointer as the sole incoming argument to the frame
5652 * that we return into when returning to the exit task glue. So
5653 * this is the signature required to retrieve it.
5655 exit_task_glue = decl_cdecl_fn(llmod, abi.exit_task_glue_name(),
5663 _vec.init_fn[ValueRef](bind decl_upcall_glue(llmod, tn, _),
5664 abi.n_upcall_glues as uint),
5665 no_op_type_glue = make_no_op_type_glue(llmod, tn),
5666 memcpy_glue = make_memcpy_glue(llmod),
5667 bzero_glue = make_bzero_glue(llmod),
5668 vec_append_glue = make_vec_append_glue(llmod, tn));
5671 fn trans_crate(session.session sess, @ast.crate crate, str output,
5674 llvm.LLVMModuleCreateWithNameInContext(_str.buf("rust_out"),
5675 llvm.LLVMGetGlobalContext());
5677 llvm.LLVMSetDataLayout(llmod, _str.buf(x86.get_data_layout()));
5678 llvm.LLVMSetTarget(llmod, _str.buf(x86.get_target_triple()));
5679 auto td = mk_target_data(x86.get_data_layout());
5680 auto tn = mk_type_names();
5681 let ValueRef crate_ptr =
5682 llvm.LLVMAddGlobal(llmod, T_crate(tn), _str.buf("rust_crate"));
5684 llvm.LLVMSetModuleInlineAsm(llmod, _str.buf(x86.get_module_asm()));
5686 auto intrinsics = declare_intrinsics(llmod);
5688 auto glues = make_glues(llmod, tn);
5689 auto hasher = ty.hash_ty;
5690 auto eqer = ty.eq_ty;
5691 auto tag_sizes = map.mk_hashmap[@ty.t,uint](hasher, eqer);
5692 auto tydescs = map.mk_hashmap[@ty.t,@tydesc_info](hasher, eqer);
5693 let vec[ast.ty_param] obj_typarams = vec();
5694 let vec[ast.obj_field] obj_fields = vec();
5696 auto cx = @rec(sess = sess,
5700 crate_ptr = crate_ptr,
5701 upcalls = new_str_hash[ValueRef](),
5702 intrinsics = intrinsics,
5703 item_names = new_str_hash[ValueRef](),
5704 item_ids = new_def_hash[ValueRef](),
5705 items = new_def_hash[@ast.item](),
5706 native_items = new_def_hash[@ast.native_item](),
5707 tag_sizes = tag_sizes,
5708 discrims = new_def_hash[ValueRef](),
5709 fn_pairs = new_def_hash[ValueRef](),
5710 consts = new_def_hash[ValueRef](),
5711 obj_methods = new_def_hash[()](),
5713 obj_typarams = obj_typarams,
5714 obj_fields = obj_fields,
5719 create_typedefs(cx);
5721 collect_items(cx, crate);
5722 collect_tag_ctors(cx, crate);
5723 trans_constants(cx, crate);
5725 trans_mod(cx, crate.node.module);
5726 trans_exit_task_glue(cx);
5727 trans_vec_append_glue(cx);
5728 create_crate_constant(cx);
5730 trans_main_fn(cx, cx.crate_ptr);
5733 check_module(llmod);
5735 llvm.LLVMWriteBitcodeToFile(llmod, _str.buf(output));
5736 llvm.LLVMDisposeModule(llmod);
5743 // indent-tabs-mode: nil
5744 // c-basic-offset: 4
5745 // buffer-file-coding-system: utf-8-unix
5746 // compile-command: "make -k -C ../.. 2>&1 | sed -e 's/\\/x\\//x:\\//g'";