1 // Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution.
4 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
7 // option. This file may not be copied, modified, or distributed
8 // except according to those terms.
11 use crate::rustc::hir::*;
12 use crate::rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
13 use crate::rustc::{declare_tool_lint, lint_array};
14 use if_chain::if_chain;
16 use crate::syntax::ast::{Name, UintTy};
17 use crate::utils::{contains_name, get_pat_name, match_type, paths, single_segment_path, snippet, span_lint_and_sugg,
20 /// **What it does:** Checks for naive byte counts
22 /// **Why is this bad?** The [`bytecount`](https://crates.io/crates/bytecount)
23 /// crate has methods to count your bytes faster, especially for large slices.
25 /// **Known problems:** If you have predominantly small slices, the
26 /// `bytecount::count(..)` method may actually be slower. However, if you can
27 /// ensure that less than 2³²-1 matches arise, the `naive_count_32(..)` can be
28 /// faster in those cases.
33 /// &my_data.filter(|&x| x == 0u8).count() // use bytecount::count instead
35 declare_clippy_lint! {
38 "use of naive `<slice>.filter(|&x| x == y).count()` to count byte values"
41 #[derive(Copy, Clone)]
44 impl LintPass for ByteCount {
45 fn get_lints(&self) -> LintArray {
46 lint_array!(NAIVE_BYTECOUNT)
50 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ByteCount {
51 fn check_expr(&mut self, cx: &LateContext<'_, '_>, expr: &Expr) {
53 if let ExprKind::MethodCall(ref count, _, ref count_args) = expr.node;
54 if count.ident.name == "count";
55 if count_args.len() == 1;
56 if let ExprKind::MethodCall(ref filter, _, ref filter_args) = count_args[0].node;
57 if filter.ident.name == "filter";
58 if filter_args.len() == 2;
59 if let ExprKind::Closure(_, _, body_id, _, _) = filter_args[1].node;
61 let body = cx.tcx.hir.body(body_id);
63 if body.arguments.len() == 1;
64 if let Some(argname) = get_pat_name(&body.arguments[0].pat);
65 if let ExprKind::Binary(ref op, ref l, ref r) = body.value.node;
66 if op.node == BinOpKind::Eq;
68 walk_ptrs_ty(cx.tables.expr_ty(&filter_args[0])),
71 let needle = match get_path_name(l) {
72 Some(name) if check_arg(name, argname, r) => r,
73 _ => match get_path_name(r) {
74 Some(name) if check_arg(name, argname, l) => l,
78 if ty::Uint(UintTy::U8) != walk_ptrs_ty(cx.tables.expr_ty(needle)).sty {
81 let haystack = if let ExprKind::MethodCall(ref path, _, ref args) =
83 let p = path.ident.name;
84 if (p == "iter" || p == "iter_mut") && args.len() == 1 {
92 span_lint_and_sugg(cx,
95 "You appear to be counting bytes the naive way",
96 "Consider using the bytecount crate",
97 format!("bytecount::count({}, {})",
98 snippet(cx, haystack.span, ".."),
99 snippet(cx, needle.span, "..")));
107 fn check_arg(name: Name, arg: Name, needle: &Expr) -> bool {
108 name == arg && !contains_name(name, needle)
111 fn get_path_name(expr: &Expr) -> Option<Name> {
113 ExprKind::Box(ref e) | ExprKind::AddrOf(_, ref e) | ExprKind::Unary(UnOp::UnDeref, ref e) => get_path_name(e),
114 ExprKind::Block(ref b, _) => if b.stmts.is_empty() {
115 b.expr.as_ref().and_then(|p| get_path_name(p))
119 ExprKind::Path(ref qpath) => single_segment_path(qpath).map(|ps| ps.ident.name),