- Add the llvm intrinsics used to manipulate a va_list.
- Add the va_list lang item in order to allow implementing
VaList in libcore.
#![stable(feature = "", since = "1.30.0")]
#![allow(non_camel_case_types)]
+#![cfg_attr(stage0, allow(dead_code))]
//! Utilities related to FFI bindings.
f.pad("c_void")
}
}
+
+/// Basic implementation of a `va_list`.
+#[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"),
+ not(target_arch = "x86_64")),
+ windows))]
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+extern {
+ type VaListImpl;
+}
+
+#[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"),
+ not(target_arch = "x86_64")),
+ windows))]
+impl fmt::Debug for VaListImpl {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "va_list* {:p}", self)
+ }
+}
+
+/// AArch64 ABI implementation of a `va_list`. See the
+/// [Aarch64 Procedure Call Standard] for more details.
+///
+/// [AArch64 Procedure Call Standard]:
+/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
+#[cfg(all(target_arch = "aarch64", not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+struct VaListImpl {
+ stack: *mut (),
+ gr_top: *mut (),
+ vr_top: *mut (),
+ gr_offs: i32,
+ vr_offs: i32,
+}
+
+/// PowerPC ABI implementation of a `va_list`.
+#[cfg(all(target_arch = "powerpc", not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+struct VaListImpl {
+ gpr: u8,
+ fpr: u8,
+ reserved: u16,
+ overflow_arg_area: *mut (),
+ reg_save_area: *mut (),
+}
+
+/// x86_64 ABI implementation of a `va_list`.
+#[cfg(all(target_arch = "x86_64", not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+struct VaListImpl {
+ gp_offset: i32,
+ fp_offset: i32,
+ overflow_arg_area: *mut (),
+ reg_save_area: *mut (),
+}
+
+/// A wrapper for a `va_list`
+#[lang = "va_list"]
+#[derive(Debug)]
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+#[repr(transparent)]
+#[cfg(not(stage0))]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+// The VaArgSafe trait needs to be used in public interfaces, however, the trait
+// itself must not be allowed to be used outside this module. Allowing users to
+// implement the trait for a new type (thereby allowing the va_arg intrinsic to
+// be used on a new type) is likely to cause undefined behavior.
+//
+// FIXME(dlrobertson): In order to use the VaArgSafe trait in a public interface
+// but also ensure it cannot be used elsewhere, the trait needs to be public
+// within a private module. Once RFC 2145 has been implemented look into
+// improving this.
+mod sealed_trait {
+ /// Trait which whitelists the allowed types to be used with [VaList::arg]
+ ///
+ /// [VaList::va_arg]: struct.VaList.html#method.arg
+ #[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+ pub trait VaArgSafe {}
+}
+
+macro_rules! impl_va_arg_safe {
+ ($($t:ty),+) => {
+ $(
+ #[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+ impl sealed_trait::VaArgSafe for $t {}
+ )+
+ }
+}
+
+impl_va_arg_safe!{i8, i16, i32, i64, usize}
+impl_va_arg_safe!{u8, u16, u32, u64, isize}
+impl_va_arg_safe!{f64}
+
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+impl<T> sealed_trait::VaArgSafe for *mut T {}
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+impl<T> sealed_trait::VaArgSafe for *const T {}
+
+#[cfg(not(stage0))]
+impl<'a> VaList<'a> {
+ /// Advance to the next arg.
+ #[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+ pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T {
+ va_arg(self)
+ }
+
+ /// Copy the `va_list` at the current location.
+ #[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+ pub unsafe fn copy<F, R>(&mut self, f: F) -> R
+ where F: for<'copy> FnOnce(VaList<'copy>) -> R {
+ #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"),
+ not(target_arch = "x86_64")),
+ windows))]
+ let mut ap = va_copy(self);
+ #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ not(windows)))]
+ let mut ap_inner = va_copy(self);
+ #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ not(windows)))]
+ let mut ap = VaList(&mut ap_inner);
+ let ret = f(VaList(ap.0));
+ va_end(&mut ap);
+ ret
+ }
+}
+
+#[cfg(not(stage0))]
+extern "rust-intrinsic" {
+ /// Destroy the arglist `ap` after initialization with `va_start` or
+ /// `va_copy`.
+ fn va_end(ap: &mut VaList);
+
+ /// Copy the current location of arglist `src` to the arglist `dst`.
+ #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"),
+ not(target_arch = "x86_64")),
+ windows))]
+ fn va_copy<'a>(src: &VaList<'a>) -> VaList<'a>;
+ #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ not(windows)))]
+ fn va_copy(src: &VaList) -> VaListImpl;
+
+ /// Loads an argument of type `T` from the `va_list` `ap` and increment the
+ /// argument `ap` points to.
+ fn va_arg<T: sealed_trait::VaArgSafe>(ap: &mut VaList) -> T;
+}
IndexMutTraitLangItem, "index_mut", index_mut_trait, Target::Trait;
UnsafeCellTypeLangItem, "unsafe_cell", unsafe_cell_type, Target::Struct;
+ VaListTypeLangItem, "va_list", va_list, Target::Struct;
DerefTraitLangItem, "deref", deref_trait, Target::Trait;
DerefMutTraitLangItem, "deref_mut", deref_mut_trait, Target::Trait;
ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
- ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
- ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
- ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
- ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
- ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
-
- ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
- ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
- ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
- ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
- ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+ ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1});
ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1});
ifn!("llvm.assume", fn(i1) -> void);
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+ // variadic intrinsics
+ ifn!("llvm.va_start", fn(i8p) -> void);
+ ifn!("llvm.va_end", fn(i8p) -> void);
+ ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+
if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
use type_::Type;
use type_of::LayoutLlvmExt;
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{LayoutOf, HasTyCtxt};
+use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, Primitive};
use rustc_codegen_ssa::common::TypeKind;
use rustc::hir;
-use syntax::ast;
+use syntax::ast::{self, FloatTy};
use syntax::symbol::Symbol;
use builder::Builder;
use value::Value;
+use va_arg::emit_va_arg;
use rustc_codegen_ssa::traits::*;
let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
}
+ func @ "va_start" | func @ "va_end" => {
+ let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) {
+ (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(),
+ (Some(_), _) => self.load(args[0].immediate(),
+ tcx.data_layout.pointer_align.abi),
+ (None, _) => bug!("va_list language item must be defined")
+ };
+ let intrinsic = self.cx().get_intrinsic(&format!("llvm.{}", func));
+ self.call(intrinsic, &[va_list], None)
+ }
+ "va_copy" => {
+ let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) {
+ (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(),
+ (Some(_), _) => self.load(args[0].immediate(),
+ tcx.data_layout.pointer_align.abi),
+ (None, _) => bug!("va_list language item must be defined")
+ };
+ let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
+ self.call(intrinsic, &[llresult, va_list], None);
+ return;
+ }
+ "va_arg" => {
+ match fn_ty.ret.layout.abi {
+ layout::Abi::Scalar(ref scalar) => {
+ match scalar.value {
+ Primitive::Int(..) => {
+ if self.cx().size_of(ret_ty).bytes() < 4 {
+ // va_arg should not be called on a integer type
+ // less than 4 bytes in length. If it is, promote
+ // the integer to a `i32` and truncate the result
+ // back to the smaller type.
+ let promoted_result = emit_va_arg(self, args[0],
+ tcx.types.i32);
+ self.trunc(promoted_result, llret_ty)
+ } else {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ }
+ Primitive::Float(FloatTy::F64) |
+ Primitive::Pointer => {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ // `va_arg` should never be used with the return type f32.
+ Primitive::Float(FloatTy::F32) => {
+ bug!("the va_arg intrinsic does not work with `f32`")
+ }
+ }
+ }
+ _ => {
+ bug!("the va_arg intrinsic does not work with non-scalar types")
+ }
+ }
+ }
"size_of_val" => {
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
mod type_;
mod type_of;
mod value;
+mod va_arg;
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use builder::Builder;
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods};
+use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size};
+use rustc::ty::Ty;
+use type_::Type;
+use type_of::LayoutLlvmExt;
+use value::Value;
+
+#[allow(dead_code)]
+fn round_pointer_up_to_alignment(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ addr: &'ll Value,
+ align: Align,
+ ptr_ty: &'ll Type
+) -> &'ll Value {
+ let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
+ ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
+ ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
+ bx.inttoptr(ptr_as_int, ptr_ty)
+}
+
+fn emit_direct_ptr_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ llty: &'ll Type,
+ size: Size,
+ align: Align,
+ slot_size: Align,
+ allow_higher_align: bool
+) -> (&'ll Value, Align) {
+ let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
+ let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
+ bx.bitcast(list.immediate(), va_list_ptr_ty)
+ } else {
+ list.immediate()
+ };
+
+ let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ let (addr, addr_align) = if allow_higher_align && align > slot_size {
+ (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+ } else {
+ (ptr, slot_size)
+ };
+
+
+ let aligned_size = size.align_to(slot_size).bytes() as i32;
+ let full_direct_size = bx.cx().const_i32(aligned_size);
+ let next = bx.inbounds_gep(addr, &[full_direct_size]);
+ bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ if size.bytes() < slot_size.bytes() &&
+ &*bx.tcx().sess.target.target.target_endian == "big" {
+ let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
+ let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
+ (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+ } else {
+ (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+ }
+}
+
+fn emit_ptr_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+ indirect: bool,
+ slot_size: Align,
+ allow_higher_align: bool
+) -> &'ll Value {
+ let layout = bx.cx.layout_of(target_ty);
+ let (llty, size, align) = if indirect {
+ (bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+ bx.cx.data_layout().pointer_size,
+ bx.cx.data_layout().pointer_align)
+ } else {
+ (layout.llvm_type(bx.cx),
+ layout.size,
+ layout.align)
+ };
+ let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi,
+ slot_size, allow_higher_align);
+ if indirect {
+ let tmp_ret = bx.load(addr, addr_align);
+ bx.load(tmp_ret, align.abi)
+ } else {
+ bx.load(addr, addr_align)
+ }
+}
+
+pub(super) fn emit_va_arg(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ addr: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Determine the va_arg implementation to use. The LLVM va_arg instruction
+ // is lacking in some instances, so we should only use it as a fallback.
+ let arch = &bx.cx.tcx.sess.target.target.arch;
+ match (&**arch,
+ bx.cx.tcx.sess.target.target.options.is_like_windows) {
+ ("x86", true) => {
+ emit_ptr_va_arg(bx, addr, target_ty, false,
+ Align::from_bytes(4).unwrap(), false)
+ }
+ ("x86_64", true) => {
+ let target_ty_size = bx.cx.size_of(target_ty).bytes();
+ let indirect = if target_ty_size > 8 || !target_ty_size.is_power_of_two() {
+ true
+ } else {
+ false
+ };
+ emit_ptr_va_arg(bx, addr, target_ty, indirect,
+ Align::from_bytes(8).unwrap(), false)
+ }
+ ("x86", false) => {
+ emit_ptr_va_arg(bx, addr, target_ty, false,
+ Align::from_bytes(4).unwrap(), true)
+ }
+ _ => {
+ let va_list = if (bx.tcx().sess.target.target.arch == "aarch64" ||
+ bx.tcx().sess.target.target.arch == "x86_64" ||
+ bx.tcx().sess.target.target.arch == "powerpc") &&
+ !bx.tcx().sess.target.target.options.is_like_windows {
+ bx.load(addr.immediate(), bx.tcx().data_layout.pointer_align.abi)
+ } else {
+ addr.immediate()
+ };
+ bx.va_arg(va_list, bx.cx.layout_of(target_ty).llvm_type(bx.cx))
+ }
+ }
+}
+
use intrinsics;
use rustc::traits::{ObligationCause, ObligationCauseCode};
use rustc::ty::{self, TyCtxt, Ty};
+use rustc::ty::subst::Subst;
use rustc::util::nodemap::FxHashMap;
use require_same_types;
it: &hir::ForeignItem) {
let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)).as_interned_str());
let name = it.name.as_str();
+
+ let mk_va_list_ty = || {
+ tcx.lang_items().va_list().map(|did| {
+ let region = tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(0)));
+ let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
+ let va_list_ty = tcx.type_of(did).subst(tcx, &[region.into()]);
+ tcx.mk_mut_ref(tcx.mk_region(env_region), va_list_ty)
+ })
+ };
+
let (n_tps, inputs, output, unsafety) = if name.starts_with("atomic_") {
let split : Vec<&str> = name.split('_').collect();
assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format");
(0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32)
}
+ "va_start" | "va_end" => {
+ match mk_va_list_ty() {
+ Some(va_list_ty) => (0, vec![va_list_ty], tcx.mk_unit()),
+ None => bug!("va_list lang_item must be defined to use va_list intrinsics")
+ }
+ }
+
+ "va_copy" => {
+ match tcx.lang_items().va_list() {
+ Some(did) => {
+ let region = tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(0)));
+ let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
+ let va_list_ty = tcx.type_of(did).subst(tcx, &[region.into()]);
+ let ret_ty = match va_list_ty.sty {
+ ty::Adt(def, _) if def.is_struct() => {
+ let fields = &def.non_enum_variant().fields;
+ match tcx.type_of(fields[0].did).subst(tcx, &[region.into()]).sty {
+ ty::Ref(_, element_ty, _) => match element_ty.sty {
+ ty::Adt(..) => element_ty,
+ _ => va_list_ty
+ }
+ _ => bug!("va_list structure is invalid")
+ }
+ }
+ _ => {
+ bug!("va_list structure is invalid")
+ }
+ };
+ (0, vec![tcx.mk_imm_ref(tcx.mk_region(env_region), va_list_ty)], ret_ty)
+ }
+ None => bug!("va_list lang_item must be defined to use va_list intrinsics")
+ }
+ }
+
+ "va_arg" => {
+ match mk_va_list_ty() {
+ Some(va_list_ty) => (1, vec![va_list_ty], param(0)),
+ None => bug!("va_list lang_item must be defined to use va_list intrinsics")
+ }
+ }
+
"nontemporal_store" => {
(1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_unit())
}
#[stable(feature = "raw_os", since = "1.1.0")]
pub use core::ffi::c_void;
+#[cfg(not(stage0))]
+#[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "27745")]
+pub use core::ffi::VaList;
+
mod c_str;
mod os_str;
#![feature(array_error_internals)]
#![feature(asm)]
#![feature(box_syntax)]
+#![feature(c_variadic)]
#![feature(cfg_target_has_atomic)]
#![feature(cfg_target_thread_local)]
#![feature(cfg_target_vendor)]
"src/libcore/tests",
"src/liballoc/tests/lib.rs",
+ // The `VaList` implementation must have platform specific code.
+ // The Windows implementation of a `va_list` is always a character
+ // pointer regardless of the target architecture. As a result,
+ // we must use `#[cfg(windows)]` to conditionally compile the
+ // correct `VaList` structure for windows.
+ "src/libcore/ffi.rs",
+
// non-std crates
"src/test",
"src/tools",