-// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
use crate::utils::{
contains_name, get_pat_name, match_type, paths, single_segment_path, snippet_with_applicability,
span_lint_and_sugg, walk_ptrs_ty,
};
use if_chain::if_chain;
-use rustc::hir::*;
-use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
-use rustc::ty;
-use rustc::{declare_tool_lint, lint_array};
+use rustc_ast::ast::{UintTy};
use rustc_errors::Applicability;
-use syntax::ast::{Name, UintTy};
+use rustc_hir::{BinOpKind, BorrowKind, Expr, ExprKind, UnOp};
+use rustc_lint::{LateContext, LateLintPass};
+use rustc_middle::ty;
+use rustc_session::{declare_lint_pass, declare_tool_lint};
+use rustc_span::Symbol;
-/// **What it does:** Checks for naive byte counts
-///
-/// **Why is this bad?** The [`bytecount`](https://crates.io/crates/bytecount)
-/// crate has methods to count your bytes faster, especially for large slices.
-///
-/// **Known problems:** If you have predominantly small slices, the
-/// `bytecount::count(..)` method may actually be slower. However, if you can
-/// ensure that less than 2³²-1 matches arise, the `naive_count_32(..)` can be
-/// faster in those cases.
-///
-/// **Example:**
-///
-/// ```rust
-/// &my_data.filter(|&x| x == 0u8).count() // use bytecount::count instead
-/// ```
declare_clippy_lint! {
+ /// **What it does:** Checks for naive byte counts
+ ///
+ /// **Why is this bad?** The [`bytecount`](https://crates.io/crates/bytecount)
+ /// crate has methods to count your bytes faster, especially for large slices.
+ ///
+ /// **Known problems:** If you have predominantly small slices, the
+ /// `bytecount::count(..)` method may actually be slower. However, if you can
+ /// ensure that less than 2³²-1 matches arise, the `naive_count_32(..)` can be
+ /// faster in those cases.
+ ///
+ /// **Example:**
+ ///
+ /// ```rust
+ /// # let vec = vec![1_u8];
+ /// &vec.iter().filter(|x| **x == 0u8).count(); // use bytecount::count instead
+ /// ```
pub NAIVE_BYTECOUNT,
perf,
"use of naive `<slice>.filter(|&x| x == y).count()` to count byte values"
}
-#[derive(Copy, Clone)]
-pub struct ByteCount;
-
-impl LintPass for ByteCount {
- fn get_lints(&self) -> LintArray {
- lint_array!(NAIVE_BYTECOUNT)
- }
-}
+declare_lint_pass!(ByteCount => [NAIVE_BYTECOUNT]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ByteCount {
- fn check_expr(&mut self, cx: &LateContext<'_, '_>, expr: &Expr) {
+ fn check_expr(&mut self, cx: &LateContext<'_, '_>, expr: &Expr<'_>) {
if_chain! {
- if let ExprKind::MethodCall(ref count, _, ref count_args) = expr.node;
- if count.ident.name == "count";
+ if let ExprKind::MethodCall(ref count, _, ref count_args) = expr.kind;
+ if count.ident.name == sym!(count);
if count_args.len() == 1;
- if let ExprKind::MethodCall(ref filter, _, ref filter_args) = count_args[0].node;
- if filter.ident.name == "filter";
+ if let ExprKind::MethodCall(ref filter, _, ref filter_args) = count_args[0].kind;
+ if filter.ident.name == sym!(filter);
if filter_args.len() == 2;
- if let ExprKind::Closure(_, _, body_id, _, _) = filter_args[1].node;
+ if let ExprKind::Closure(_, _, body_id, _, _) = filter_args[1].kind;
then {
let body = cx.tcx.hir().body(body_id);
if_chain! {
- if body.arguments.len() == 1;
- if let Some(argname) = get_pat_name(&body.arguments[0].pat);
- if let ExprKind::Binary(ref op, ref l, ref r) = body.value.node;
+ if body.params.len() == 1;
+ if let Some(argname) = get_pat_name(&body.params[0].pat);
+ if let ExprKind::Binary(ref op, ref l, ref r) = body.value.kind;
if op.node == BinOpKind::Eq;
if match_type(cx,
walk_ptrs_ty(cx.tables.expr_ty(&filter_args[0])),
_ => { return; }
}
};
- if ty::Uint(UintTy::U8) != walk_ptrs_ty(cx.tables.expr_ty(needle)).sty {
+ if ty::Uint(UintTy::U8) != walk_ptrs_ty(cx.tables.expr_ty(needle)).kind {
return;
}
let haystack = if let ExprKind::MethodCall(ref path, _, ref args) =
- filter_args[0].node {
+ filter_args[0].kind {
let p = path.ident.name;
- if (p == "iter" || p == "iter_mut") && args.len() == 1 {
+ if (p == sym!(iter) || p == sym!(iter_mut)) && args.len() == 1 {
&args[0]
} else {
&filter_args[0]
} else {
&filter_args[0]
};
- let mut applicability = Applicability::MachineApplicable;
+ let mut applicability = Applicability::MaybeIncorrect;
span_lint_and_sugg(
cx,
NAIVE_BYTECOUNT,
}
}
-fn check_arg(name: Name, arg: Name, needle: &Expr) -> bool {
+fn check_arg(name: Symbol, arg: Symbol, needle: &Expr<'_>) -> bool {
name == arg && !contains_name(name, needle)
}
-fn get_path_name(expr: &Expr) -> Option<Name> {
- match expr.node {
- ExprKind::Box(ref e) | ExprKind::AddrOf(_, ref e) | ExprKind::Unary(UnOp::UnDeref, ref e) => get_path_name(e),
+fn get_path_name(expr: &Expr<'_>) -> Option<Symbol> {
+ match expr.kind {
+ ExprKind::Box(ref e) | ExprKind::AddrOf(BorrowKind::Ref, _, ref e) | ExprKind::Unary(UnOp::UnDeref, ref e) => {
+ get_path_name(e)
+ },
ExprKind::Block(ref b, _) => {
if b.stmts.is_empty() {
b.expr.as_ref().and_then(|p| get_path_name(p))