4 use crate::utils::{in_macro_or_desugar, is_copy, is_self_ty, snippet, span_lint_and_sugg};
5 use if_chain::if_chain;
8 use rustc::hir::intravisit::FnKind;
10 use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
11 use rustc::session::config::Config as SessionConfig;
12 use rustc::ty::{self, FnSig};
13 use rustc::{declare_tool_lint, impl_lint_pass};
14 use rustc_errors::Applicability;
15 use rustc_target::abi::LayoutOf;
16 use rustc_target::spec::abi::Abi;
19 declare_clippy_lint! {
20 /// **What it does:** Checks for functions taking arguments by reference, where
21 /// the argument type is `Copy` and small enough to be more efficient to always
24 /// **Why is this bad?** In many calling conventions instances of structs will
25 /// be passed through registers if they fit into two or less general purpose
28 /// **Known problems:** This lint is target register size dependent, it is
29 /// limited to 32-bit to try and reduce portability problems between 32 and
30 /// 64-bit, but if you are compiling for 8 or 16-bit targets then the limit
31 /// will be different.
33 /// The configuration option `trivial_copy_size_limit` can be set to override
34 /// this limit for a project.
36 /// This lint attempts to allow passing arguments by reference if a reference
37 /// to that argument is returned. This is implemented by comparing the lifetime
38 /// of the argument and return value for equality. However, this can cause
39 /// false positives in cases involving multiple lifetimes that are bounded by
45 /// assert_eq!(v, 42);
49 /// assert_eq!(v, 42);
52 pub TRIVIALLY_COPY_PASS_BY_REF,
54 "functions taking small copyable arguments by reference"
57 pub struct TriviallyCopyPassByRef {
61 impl<'a, 'tcx> TriviallyCopyPassByRef {
62 pub fn new(limit: Option<u64>, target: &SessionConfig) -> Self {
63 let limit = limit.unwrap_or_else(|| {
64 let bit_width = target.usize_ty.bit_width().expect("usize should have a width") as u64;
65 // Cap the calculated bit width at 32-bits to reduce
66 // portability problems between 32 and 64-bit targets
67 let bit_width = cmp::min(bit_width, 32);
68 let byte_width = bit_width / 8;
69 // Use a limit of 2 times the register bit width
75 fn check_trait_method(&mut self, cx: &LateContext<'_, 'tcx>, item: &TraitItemRef) {
76 let method_def_id = cx.tcx.hir().local_def_id_from_hir_id(item.id.hir_id);
77 let method_sig = cx.tcx.fn_sig(method_def_id);
78 let method_sig = cx.tcx.erase_late_bound_regions(&method_sig);
80 let decl = match cx.tcx.hir().fn_decl_by_hir_id(item.id.hir_id) {
85 self.check_poly_fn(cx, &decl, &method_sig, None);
88 fn check_poly_fn(&mut self, cx: &LateContext<'_, 'tcx>, decl: &FnDecl, sig: &FnSig<'tcx>, span: Option<Span>) {
89 // Use lifetimes to determine if we're returning a reference to the
90 // argument. In that case we can't switch to pass-by-value as the
91 // argument will not live long enough.
92 let output_lts = match sig.output().sty {
93 ty::Ref(output_lt, _, _) => vec![output_lt],
94 ty::Adt(_, substs) => substs.regions().collect(),
98 for (input, &ty) in decl.inputs.iter().zip(sig.inputs()) {
99 // All spans generated from a proc-macro invocation are the same...
101 Some(s) if s == input.span => return,
106 if let ty::Ref(input_lt, ty, Mutability::MutImmutable) = ty.sty;
107 if !output_lts.contains(&input_lt);
109 if let Some(size) = cx.layout_of(ty).ok().map(|l| l.size.bytes());
110 if size <= self.limit;
111 if let hir::TyKind::Rptr(_, MutTy { ty: ref decl_ty, .. }) = input.node;
113 let value_type = if is_self_ty(decl_ty) {
116 snippet(cx, decl_ty.span, "_").into()
120 TRIVIALLY_COPY_PASS_BY_REF,
122 "this argument is passed by reference, but would be more efficient if passed by value",
123 "consider passing by value instead",
125 Applicability::Unspecified,
132 fn check_trait_items(&mut self, cx: &LateContext<'_, '_>, trait_items: &[TraitItemRef]) {
133 for item in trait_items {
134 if let AssociatedItemKind::Method { .. } = item.kind {
135 self.check_trait_method(cx, item);
141 impl_lint_pass!(TriviallyCopyPassByRef => [TRIVIALLY_COPY_PASS_BY_REF]);
143 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TriviallyCopyPassByRef {
144 fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx Item) {
145 if in_macro_or_desugar(item.span) {
148 if let ItemKind::Trait(_, _, _, _, ref trait_items) = item.node {
149 self.check_trait_items(cx, trait_items);
155 cx: &LateContext<'a, 'tcx>,
162 if in_macro_or_desugar(span) {
167 FnKind::ItemFn(.., header, _, attrs) => {
168 if header.abi != Abi::Rust {
172 if a.meta_item_list().is_some() && a.check_name(*sym::proc_macro_derive) {
177 FnKind::Method(..) => (),
181 // Exclude non-inherent impls
182 if let Some(Node::Item(item)) = cx
185 .find_by_hir_id(cx.tcx.hir().get_parent_node_by_hir_id(hir_id))
187 if matches!(item.node, ItemKind::Impl(_, _, _, _, Some(_), _, _) |
194 let fn_def_id = cx.tcx.hir().local_def_id_from_hir_id(hir_id);
196 let fn_sig = cx.tcx.fn_sig(fn_def_id);
197 let fn_sig = cx.tcx.erase_late_bound_regions(&fn_sig);
199 self.check_poly_fn(cx, decl, &fn_sig, Some(span));