6 use rustc::hir::intravisit::FnKind;
7 use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
8 use rustc::{declare_tool_lint, lint_array};
9 use if_chain::if_chain;
10 use rustc::ty::TyKind;
11 use rustc::session::config::Config as SessionConfig;
12 use rustc_target::spec::abi::Abi;
13 use rustc_target::abi::LayoutOf;
14 use syntax::ast::NodeId;
16 use crate::utils::{in_macro, is_copy, is_self, span_lint_and_sugg, snippet};
18 /// **What it does:** Checks for functions taking arguments by reference, where
19 /// the argument type is `Copy` and small enough to be more efficient to always
22 /// **Why is this bad?** In many calling conventions instances of structs will
23 /// be passed through registers if they fit into two or less general purpose
26 /// **Known problems:** This lint is target register size dependent, it is
27 /// limited to 32-bit to try and reduce portability problems between 32 and
28 /// 64-bit, but if you are compiling for 8 or 16-bit targets then the limit
29 /// will be different.
31 /// The configuration option `trivial_copy_size_limit` can be set to override
32 /// this limit for a project.
34 /// This lint attempts to allow passing arguments by reference if a reference
35 /// to that argument is returned. This is implemented by comparing the lifetime
36 /// of the argument and return value for equality. However, this can cause
37 /// false positives in cases involving multiple lifetimes that are bounded by
43 /// assert_eq!(v, 42);
47 /// assert_eq!(v, 42);
50 declare_clippy_lint! {
51 pub TRIVIALLY_COPY_PASS_BY_REF,
53 "functions taking small copyable arguments by reference"
56 pub struct TriviallyCopyPassByRef {
60 impl TriviallyCopyPassByRef {
61 pub fn new(limit: Option<u64>, target: &SessionConfig) -> Self {
62 let limit = limit.unwrap_or_else(|| {
63 let bit_width = target.usize_ty.bit_width().expect("usize should have a width") as u64;
64 // Cap the calculated bit width at 32-bits to reduce
65 // portability problems between 32 and 64-bit targets
66 let bit_width = cmp::min(bit_width, 32);
67 let byte_width = bit_width / 8;
68 // Use a limit of 2 times the register bit width
75 impl LintPass for TriviallyCopyPassByRef {
76 fn get_lints(&self) -> LintArray {
77 lint_array![TRIVIALLY_COPY_PASS_BY_REF]
81 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TriviallyCopyPassByRef {
84 cx: &LateContext<'a, 'tcx>,
96 FnKind::ItemFn(.., header, _, attrs) => {
97 if header.abi != Abi::Rust {
101 if a.meta_item_list().is_some() && a.name() == "proc_macro_derive" {
106 FnKind::Method(..) => (),
110 // Exclude non-inherent impls
111 if let Some(Node::Item(item)) = cx.tcx.hir.find(cx.tcx.hir.get_parent_node(node_id)) {
112 if matches!(item.node, ItemKind::Impl(_, _, _, _, Some(_), _, _) |
119 let fn_def_id = cx.tcx.hir.local_def_id(node_id);
121 let fn_sig = cx.tcx.fn_sig(fn_def_id);
122 let fn_sig = cx.tcx.erase_late_bound_regions(&fn_sig);
124 // Use lifetimes to determine if we're returning a reference to the
125 // argument. In that case we can't switch to pass-by-value as the
126 // argument will not live long enough.
127 let output_lts = match fn_sig.output().sty {
128 TyKind::Ref(output_lt, _, _) => vec![output_lt],
129 TyKind::Adt(_, substs) => substs.regions().collect(),
133 for ((input, &ty), arg) in decl.inputs.iter().zip(fn_sig.inputs()).zip(&body.arguments) {
134 // All spans generated from a proc-macro invocation are the same...
135 if span == input.span {
140 if let TyKind::Ref(input_lt, ty, Mutability::MutImmutable) = ty.sty;
141 if !output_lts.contains(&input_lt);
143 if let Some(size) = cx.layout_of(ty).ok().map(|l| l.size.bytes());
144 if size <= self.limit;
145 if let hir::TyKind::Rptr(_, MutTy { ty: ref decl_ty, .. }) = input.node;
147 let value_type = if is_self(arg) {
150 snippet(cx, decl_ty.span, "_").into()
154 TRIVIALLY_COPY_PASS_BY_REF,
156 "this argument is passed by reference, but would be more efficient if passed by value",
157 "consider passing by value instead",