1 #![allow(default_hash_types)]
4 use std::cmp::Ordering;
5 use std::collections::BTreeMap;
7 use if_chain::if_chain;
9 use rustc::hir::intravisit::{walk_body, walk_expr, walk_ty, FnKind, NestedVisitorMap, Visitor};
11 use rustc::lint::{in_external_macro, LateContext, LateLintPass, LintArray, LintContext, LintPass};
12 use rustc::ty::layout::LayoutOf;
13 use rustc::ty::{self, InferTy, Ty, TyCtxt, TypeckTables};
14 use rustc::{declare_lint_pass, declare_tool_lint, impl_lint_pass};
15 use rustc_errors::Applicability;
16 use rustc_target::spec::abi::Abi;
17 use rustc_typeck::hir_ty_to_ty;
18 use syntax::ast::{FloatTy, IntTy, UintTy};
19 use syntax::errors::DiagnosticBuilder;
20 use syntax::source_map::Span;
21 use syntax::symbol::Symbol;
23 use crate::consts::{constant, Constant};
24 use crate::utils::paths;
25 use crate::utils::sym;
27 clip, comparisons, differing_macro_contexts, higher, in_constant, in_macro_or_desugar, int_bits, last_path_segment,
28 match_def_path, match_path, multispan_sugg, same_tys, sext, snippet, snippet_opt, snippet_with_applicability,
29 span_help_and_lint, span_lint, span_lint_and_sugg, span_lint_and_then, unsext,
32 declare_clippy_lint! {
33 /// **What it does:** Checks for use of `Box<Vec<_>>` anywhere in the code.
35 /// **Why is this bad?** `Vec` already keeps its contents in a separate area on
36 /// the heap. So if you `Box` it, you just add another level of indirection
37 /// without any benefit whatsoever.
39 /// **Known problems:** None.
44 /// values: Box<Vec<Foo>>,
57 "usage of `Box<Vec<T>>`, vector elements are already on the heap"
60 declare_clippy_lint! {
61 /// **What it does:** Checks for use of `Vec<Box<T>>` where T: Sized anywhere in the code.
63 /// **Why is this bad?** `Vec` already keeps its contents in a separate area on
64 /// the heap. So if you `Box` its contents, you just add another level of indirection.
66 /// **Known problems:** Vec<Box<T: Sized>> makes sense if T is a large type (see #3530,
72 /// values: Vec<Box<i32>>,
85 "usage of `Vec<Box<T>>` where T: Sized, vector elements are already on the heap"
88 declare_clippy_lint! {
89 /// **What it does:** Checks for use of `Option<Option<_>>` in function signatures and type
92 /// **Why is this bad?** `Option<_>` represents an optional value. `Option<Option<_>>`
93 /// represents an optional optional value which is logically the same thing as an optional
94 /// value but has an unneeded extra level of wrapping.
96 /// **Known problems:** None.
100 /// fn x() -> Option<Option<u32>> {
106 "usage of `Option<Option<T>>`"
109 declare_clippy_lint! {
110 /// **What it does:** Checks for usage of any `LinkedList`, suggesting to use a
111 /// `Vec` or a `VecDeque` (formerly called `RingBuf`).
113 /// **Why is this bad?** Gankro says:
115 /// > The TL;DR of `LinkedList` is that it's built on a massive amount of
116 /// pointers and indirection.
117 /// > It wastes memory, it has terrible cache locality, and is all-around slow.
119 /// > "only" amortized for push/pop, should be faster in the general case for
120 /// almost every possible
121 /// > workload, and isn't even amortized at all if you can predict the capacity
124 /// > `LinkedList`s are only really good if you're doing a lot of merging or
125 /// splitting of lists.
126 /// > This is because they can just mangle some pointers instead of actually
127 /// copying the data. Even
128 /// > if you're doing a lot of insertion in the middle of the list, `RingBuf`
129 /// can still be better
130 /// > because of how expensive it is to seek to the middle of a `LinkedList`.
132 /// **Known problems:** False positives – the instances where using a
133 /// `LinkedList` makes sense are few and far between, but they can still happen.
137 /// let x = LinkedList::new();
141 "usage of LinkedList, usually a vector is faster, or a more specialized data structure like a VecDeque"
144 declare_clippy_lint! {
145 /// **What it does:** Checks for use of `&Box<T>` anywhere in the code.
147 /// **Why is this bad?** Any `&Box<T>` can also be a `&T`, which is more
150 /// **Known problems:** None.
154 /// fn foo(bar: &Box<T>) { ... }
160 /// fn foo(bar: &T) { ... }
164 "a borrow of a boxed type"
167 declare_lint_pass!(Types => [BOX_VEC, VEC_BOX, OPTION_OPTION, LINKEDLIST, BORROWED_BOX]);
169 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Types {
170 fn check_fn(&mut self, cx: &LateContext<'_, '_>, _: FnKind<'_>, decl: &FnDecl, _: &Body, _: Span, id: HirId) {
171 // Skip trait implementations; see issue #605.
172 if let Some(hir::Node::Item(item)) = cx.tcx.hir().find_by_hir_id(cx.tcx.hir().get_parent_item(id)) {
173 if let ItemKind::Impl(_, _, _, _, Some(..), _, _) = item.node {
178 check_fn_decl(cx, decl);
181 fn check_struct_field(&mut self, cx: &LateContext<'_, '_>, field: &hir::StructField) {
182 check_ty(cx, &field.ty, false);
185 fn check_trait_item(&mut self, cx: &LateContext<'_, '_>, item: &TraitItem) {
187 TraitItemKind::Const(ref ty, _) | TraitItemKind::Type(_, Some(ref ty)) => check_ty(cx, ty, false),
188 TraitItemKind::Method(ref sig, _) => check_fn_decl(cx, &sig.decl),
193 fn check_local(&mut self, cx: &LateContext<'_, '_>, local: &Local) {
194 if let Some(ref ty) = local.ty {
195 check_ty(cx, ty, true);
200 fn check_fn_decl(cx: &LateContext<'_, '_>, decl: &FnDecl) {
201 for input in &decl.inputs {
202 check_ty(cx, input, false);
205 if let FunctionRetTy::Return(ref ty) = decl.output {
206 check_ty(cx, ty, false);
210 /// Checks if `qpath` has last segment with type parameter matching `path`
211 fn match_type_parameter(cx: &LateContext<'_, '_>, qpath: &QPath, path: &[Symbol]) -> bool {
212 let last = last_path_segment(qpath);
214 if let Some(ref params) = last.args;
215 if !params.parenthesized;
216 if let Some(ty) = params.args.iter().find_map(|arg| match arg {
217 GenericArg::Type(ty) => Some(ty),
220 if let TyKind::Path(ref qpath) = ty.node;
221 if let Some(did) = cx.tables.qpath_res(qpath, ty.hir_id).opt_def_id();
222 if match_def_path(cx, did, path);
230 /// Recursively check for `TypePass` lints in the given type. Stop at the first
233 /// The parameter `is_local` distinguishes the context of the type; types from
234 /// local bindings should only be checked for the `BORROWED_BOX` lint.
235 #[allow(clippy::too_many_lines)]
236 fn check_ty(cx: &LateContext<'_, '_>, hir_ty: &hir::Ty, is_local: bool) {
237 if in_macro_or_desugar(hir_ty.span) {
241 TyKind::Path(ref qpath) if !is_local => {
242 let hir_id = hir_ty.hir_id;
243 let res = cx.tables.qpath_res(qpath, hir_id);
244 if let Some(def_id) = res.opt_def_id() {
245 if Some(def_id) == cx.tcx.lang_items().owned_box() {
246 if match_type_parameter(cx, qpath, &*paths::VEC) {
251 "you seem to be trying to use `Box<Vec<T>>`. Consider using just `Vec<T>`",
252 "`Vec<T>` is already on the heap, `Box<Vec<T>>` makes an extra allocation.",
254 return; // don't recurse into the type
256 } else if match_def_path(cx, def_id, &*paths::VEC) {
258 // Get the _ part of Vec<_>
259 if let Some(ref last) = last_path_segment(qpath).args;
260 if let Some(ty) = last.args.iter().find_map(|arg| match arg {
261 GenericArg::Type(ty) => Some(ty),
264 // ty is now _ at this point
265 if let TyKind::Path(ref ty_qpath) = ty.node;
266 let res = cx.tables.qpath_res(ty_qpath, ty.hir_id);
267 if let Some(def_id) = res.opt_def_id();
268 if Some(def_id) == cx.tcx.lang_items().owned_box();
269 // At this point, we know ty is Box<T>, now get T
270 if let Some(ref last) = last_path_segment(ty_qpath).args;
271 if let Some(boxed_ty) = last.args.iter().find_map(|arg| match arg {
272 GenericArg::Type(ty) => Some(ty),
276 let ty_ty = hir_ty_to_ty(cx.tcx, boxed_ty);
277 if ty_ty.is_sized(cx.tcx.at(ty.span), cx.param_env) {
282 "`Vec<T>` is already on the heap, the boxing is unnecessary.",
284 format!("Vec<{}>", ty_ty),
285 Applicability::MachineApplicable,
287 return; // don't recurse into the type
291 } else if match_def_path(cx, def_id, &*paths::OPTION) {
292 if match_type_parameter(cx, qpath, &*paths::OPTION) {
297 "consider using `Option<T>` instead of `Option<Option<T>>` or a custom \
298 enum if you need to distinguish all 3 cases",
300 return; // don't recurse into the type
302 } else if match_def_path(cx, def_id, &*paths::LINKED_LIST) {
307 "I see you're using a LinkedList! Perhaps you meant some other data structure?",
308 "a VecDeque might work",
310 return; // don't recurse into the type
314 QPath::Resolved(Some(ref ty), ref p) => {
315 check_ty(cx, ty, is_local);
316 for ty in p.segments.iter().flat_map(|seg| {
319 .map_or_else(|| [].iter(), |params| params.args.iter())
320 .filter_map(|arg| match arg {
321 GenericArg::Type(ty) => Some(ty),
325 check_ty(cx, ty, is_local);
328 QPath::Resolved(None, ref p) => {
329 for ty in p.segments.iter().flat_map(|seg| {
332 .map_or_else(|| [].iter(), |params| params.args.iter())
333 .filter_map(|arg| match arg {
334 GenericArg::Type(ty) => Some(ty),
338 check_ty(cx, ty, is_local);
341 QPath::TypeRelative(ref ty, ref seg) => {
342 check_ty(cx, ty, is_local);
343 if let Some(ref params) = seg.args {
344 for ty in params.args.iter().filter_map(|arg| match arg {
345 GenericArg::Type(ty) => Some(ty),
348 check_ty(cx, ty, is_local);
354 TyKind::Rptr(ref lt, ref mut_ty) => check_ty_rptr(cx, hir_ty, is_local, lt, mut_ty),
356 TyKind::Slice(ref ty) | TyKind::Array(ref ty, _) | TyKind::Ptr(MutTy { ref ty, .. }) => {
357 check_ty(cx, ty, is_local)
359 TyKind::Tup(ref tys) => {
361 check_ty(cx, ty, is_local);
368 fn check_ty_rptr(cx: &LateContext<'_, '_>, hir_ty: &hir::Ty, is_local: bool, lt: &Lifetime, mut_ty: &MutTy) {
369 match mut_ty.ty.node {
370 TyKind::Path(ref qpath) => {
371 let hir_id = mut_ty.ty.hir_id;
372 let def = cx.tables.qpath_res(qpath, hir_id);
374 if let Some(def_id) = def.opt_def_id();
375 if Some(def_id) == cx.tcx.lang_items().owned_box();
376 if let QPath::Resolved(None, ref path) = *qpath;
377 if let [ref bx] = *path.segments;
378 if let Some(ref params) = bx.args;
379 if !params.parenthesized;
380 if let Some(inner) = params.args.iter().find_map(|arg| match arg {
381 GenericArg::Type(ty) => Some(ty),
385 if is_any_trait(inner) {
386 // Ignore `Box<Any>` types; see issue #1884 for details.
390 let ltopt = if lt.is_elided() {
393 format!("{} ", lt.name.ident().as_str())
395 let mutopt = if mut_ty.mutbl == Mutability::MutMutable {
400 let mut applicability = Applicability::MachineApplicable;
405 "you seem to be trying to use `&Box<T>`. Consider using just `&T`",
411 &snippet_with_applicability(cx, inner.span, "..", &mut applicability)
413 Applicability::Unspecified,
415 return; // don't recurse into the type
418 check_ty(cx, &mut_ty.ty, is_local);
420 _ => check_ty(cx, &mut_ty.ty, is_local),
424 // Returns true if given type is `Any` trait.
425 fn is_any_trait(t: &hir::Ty) -> bool {
427 if let TyKind::TraitObject(ref traits, _) = t.node;
428 if traits.len() >= 1;
429 // Only Send/Sync can be used as additional traits, so it is enough to
430 // check only the first trait.
431 if match_path(&traits[0].trait_ref.path, &*paths::ANY_TRAIT);
440 declare_clippy_lint! {
441 /// **What it does:** Checks for binding a unit value.
443 /// **Why is this bad?** A unit value cannot usefully be used anywhere. So
444 /// binding one is kind of pointless.
446 /// **Known problems:** None.
456 "creating a let binding to a value of unit type, which usually can't be used afterwards"
459 declare_lint_pass!(LetUnitValue => [LET_UNIT_VALUE]);
461 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for LetUnitValue {
462 fn check_stmt(&mut self, cx: &LateContext<'a, 'tcx>, stmt: &'tcx Stmt) {
463 if let StmtKind::Local(ref local) = stmt.node {
464 if is_unit(cx.tables.pat_ty(&local.pat)) {
465 if in_external_macro(cx.sess(), stmt.span) || in_macro_or_desugar(local.pat.span) {
468 if higher::is_from_for_desugar(local) {
476 "this let-binding has unit value. Consider omitting `let {} =`",
477 snippet(cx, local.pat.span, "..")
485 declare_clippy_lint! {
486 /// **What it does:** Checks for comparisons to unit.
488 /// **Why is this bad?** Unit is always equal to itself, and thus is just a
489 /// clumsily written constant. Mostly this happens when someone accidentally
490 /// adds semicolons at the end of the operands.
492 /// **Known problems:** None.
520 "comparing unit values"
523 declare_lint_pass!(UnitCmp => [UNIT_CMP]);
525 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnitCmp {
526 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
527 if in_macro_or_desugar(expr.span) {
530 if let ExprKind::Binary(ref cmp, ref left, _) = expr.node {
532 if op.is_comparison() && is_unit(cx.tables.expr_ty(left)) {
533 let result = match op {
534 BinOpKind::Eq | BinOpKind::Le | BinOpKind::Ge => "true",
542 "{}-comparison of unit values detected. This will always be {}",
552 declare_clippy_lint! {
553 /// **What it does:** Checks for passing a unit value as an argument to a function without using a
554 /// unit literal (`()`).
556 /// **Why is this bad?** This is likely the result of an accidental semicolon.
558 /// **Known problems:** None.
569 "passing unit to a function"
572 declare_lint_pass!(UnitArg => [UNIT_ARG]);
574 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnitArg {
575 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
576 if in_macro_or_desugar(expr.span) {
580 // apparently stuff in the desugaring of `?` can trigger this
581 // so check for that here
582 // only the calls to `Try::from_error` is marked as desugared,
583 // so we need to check both the current Expr and its parent.
584 if is_questionmark_desugar_marked_call(expr) {
588 let map = &cx.tcx.hir();
589 let opt_parent_node = map.find_by_hir_id(map.get_parent_node_by_hir_id(expr.hir_id));
590 if let Some(hir::Node::Expr(parent_expr)) = opt_parent_node;
591 if is_questionmark_desugar_marked_call(parent_expr);
598 ExprKind::Call(_, ref args) | ExprKind::MethodCall(_, _, ref args) => {
600 if is_unit(cx.tables.expr_ty(arg)) && !is_unit_literal(arg) {
601 if let ExprKind::Match(.., match_source) = &arg.node {
602 if *match_source == MatchSource::TryDesugar {
611 "passing a unit value to a function",
612 "if you intended to pass a unit value, use a unit literal instead",
614 Applicability::MachineApplicable,
624 fn is_questionmark_desugar_marked_call(expr: &Expr) -> bool {
625 use syntax_pos::hygiene::CompilerDesugaringKind;
626 if let ExprKind::Call(ref callee, _) = expr.node {
627 callee.span.is_compiler_desugaring(CompilerDesugaringKind::QuestionMark)
633 fn is_unit(ty: Ty<'_>) -> bool {
635 ty::Tuple(slice) if slice.is_empty() => true,
640 fn is_unit_literal(expr: &Expr) -> bool {
642 ExprKind::Tup(ref slice) if slice.is_empty() => true,
647 declare_clippy_lint! {
648 /// **What it does:** Checks for casts from any numerical to a float type where
649 /// the receiving type cannot store all values from the original type without
650 /// rounding errors. This possible rounding is to be expected, so this lint is
651 /// `Allow` by default.
653 /// Basically, this warns on casting any integer with 32 or more bits to `f32`
654 /// or any 64-bit integer to `f64`.
656 /// **Why is this bad?** It's not bad at all. But in some applications it can be
657 /// helpful to know where precision loss can take place. This lint can help find
658 /// those places in the code.
660 /// **Known problems:** None.
664 /// let x = u64::MAX;
667 pub CAST_PRECISION_LOSS,
669 "casts that cause loss of precision, e.g., `x as f32` where `x: u64`"
672 declare_clippy_lint! {
673 /// **What it does:** Checks for casts from a signed to an unsigned numerical
674 /// type. In this case, negative values wrap around to large positive values,
675 /// which can be quite surprising in practice. However, as the cast works as
676 /// defined, this lint is `Allow` by default.
678 /// **Why is this bad?** Possibly surprising results. You can activate this lint
679 /// as a one-time check to see where numerical wrapping can arise.
681 /// **Known problems:** None.
686 /// y as u128 // will return 18446744073709551615
690 "casts from signed types to unsigned types, e.g., `x as u32` where `x: i32`"
693 declare_clippy_lint! {
694 /// **What it does:** Checks for on casts between numerical types that may
695 /// truncate large values. This is expected behavior, so the cast is `Allow` by
698 /// **Why is this bad?** In some problem domains, it is good practice to avoid
699 /// truncation. This lint can be activated to help assess where additional
700 /// checks could be beneficial.
702 /// **Known problems:** None.
706 /// fn as_u8(x: u64) -> u8 {
710 pub CAST_POSSIBLE_TRUNCATION,
712 "casts that may cause truncation of the value, e.g., `x as u8` where `x: u32`, or `x as i32` where `x: f32`"
715 declare_clippy_lint! {
716 /// **What it does:** Checks for casts from an unsigned type to a signed type of
717 /// the same size. Performing such a cast is a 'no-op' for the compiler,
718 /// i.e., nothing is changed at the bit level, and the binary representation of
719 /// the value is reinterpreted. This can cause wrapping if the value is too big
720 /// for the target signed type. However, the cast works as defined, so this lint
721 /// is `Allow` by default.
723 /// **Why is this bad?** While such a cast is not bad in itself, the results can
724 /// be surprising when this is not the intended behavior, as demonstrated by the
727 /// **Known problems:** None.
731 /// u32::MAX as i32 // will yield a value of `-1`
733 pub CAST_POSSIBLE_WRAP,
735 "casts that may cause wrapping around the value, e.g., `x as i32` where `x: u32` and `x > i32::MAX`"
738 declare_clippy_lint! {
739 /// **What it does:** Checks for on casts between numerical types that may
740 /// be replaced by safe conversion functions.
742 /// **Why is this bad?** Rust's `as` keyword will perform many kinds of
743 /// conversions, including silently lossy conversions. Conversion functions such
744 /// as `i32::from` will only perform lossless conversions. Using the conversion
745 /// functions prevents conversions from turning into silent lossy conversions if
746 /// the types of the input expressions ever change, and make it easier for
747 /// people reading the code to know that the conversion is lossless.
749 /// **Known problems:** None.
753 /// fn as_u64(x: u8) -> u64 {
758 /// Using `::from` would look like this:
761 /// fn as_u64(x: u8) -> u64 {
767 "casts using `as` that are known to be lossless, e.g., `x as u64` where `x: u8`"
770 declare_clippy_lint! {
771 /// **What it does:** Checks for casts to the same type.
773 /// **Why is this bad?** It's just unnecessary.
775 /// **Known problems:** None.
779 /// let _ = 2i32 as i32
781 pub UNNECESSARY_CAST,
783 "cast to the same type, e.g., `x as i32` where `x: i32`"
786 declare_clippy_lint! {
787 /// **What it does:** Checks for casts from a less-strictly-aligned pointer to a
788 /// more-strictly-aligned pointer
790 /// **Why is this bad?** Dereferencing the resulting pointer may be undefined
793 /// **Known problems:** None.
797 /// let _ = (&1u8 as *const u8) as *const u16;
798 /// let _ = (&mut 1u8 as *mut u8) as *mut u16;
800 pub CAST_PTR_ALIGNMENT,
802 "cast from a pointer to a more-strictly-aligned pointer"
805 declare_clippy_lint! {
806 /// **What it does:** Checks for casts of function pointers to something other than usize
808 /// **Why is this bad?**
809 /// Casting a function pointer to anything other than usize/isize is not portable across
810 /// architectures, because you end up losing bits if the target type is too small or end up with a
811 /// bunch of extra bits that waste space and add more instructions to the final binary than
812 /// strictly necessary for the problem
814 /// Casting to isize also doesn't make sense since there are no signed addresses.
820 /// fn fun() -> i32 { 1 }
821 /// let a = fun as i64;
824 /// fn fun2() -> i32 { 1 }
825 /// let a = fun2 as usize;
827 pub FN_TO_NUMERIC_CAST,
829 "casting a function pointer to a numeric type other than usize"
832 declare_clippy_lint! {
833 /// **What it does:** Checks for casts of a function pointer to a numeric type not wide enough to
836 /// **Why is this bad?**
837 /// Such a cast discards some bits of the function's address. If this is intended, it would be more
838 /// clearly expressed by casting to usize first, then casting the usize to the intended type (with
839 /// a comment) to perform the truncation.
845 /// fn fn1() -> i16 {
848 /// let _ = fn1 as i32;
850 /// // Better: Cast to usize first, then comment with the reason for the truncation
851 /// fn fn2() -> i16 {
854 /// let fn_ptr = fn2 as usize;
855 /// let fn_ptr_truncated = fn_ptr as i32;
857 pub FN_TO_NUMERIC_CAST_WITH_TRUNCATION,
859 "casting a function pointer to a numeric type not wide enough to store the address"
862 /// Returns the size in bits of an integral type.
863 /// Will return 0 if the type is not an int or uint variant
864 fn int_ty_to_nbits(typ: Ty<'_>, tcx: TyCtxt<'_, '_, '_>) -> u64 {
866 ty::Int(i) => match i {
867 IntTy::Isize => tcx.data_layout.pointer_size.bits(),
874 ty::Uint(i) => match i {
875 UintTy::Usize => tcx.data_layout.pointer_size.bits(),
886 fn is_isize_or_usize(typ: Ty<'_>) -> bool {
888 ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => true,
893 fn span_precision_loss_lint(cx: &LateContext<'_, '_>, expr: &Expr, cast_from: Ty<'_>, cast_to_f64: bool) {
894 let mantissa_nbits = if cast_to_f64 { 52 } else { 23 };
895 let arch_dependent = is_isize_or_usize(cast_from) && cast_to_f64;
896 let arch_dependent_str = "on targets with 64-bit wide pointers ";
897 let from_nbits_str = if arch_dependent {
899 } else if is_isize_or_usize(cast_from) {
900 "32 or 64".to_owned()
902 int_ty_to_nbits(cast_from, cx.tcx).to_string()
909 "casting {0} to {1} causes a loss of precision {2}({0} is {3} bits wide, but {1}'s mantissa \
910 is only {4} bits wide)",
912 if cast_to_f64 { "f64" } else { "f32" },
913 if arch_dependent { arch_dependent_str } else { "" },
920 fn should_strip_parens(op: &Expr, snip: &str) -> bool {
921 if let ExprKind::Binary(_, _, _) = op.node {
922 if snip.starts_with('(') && snip.ends_with(')') {
929 fn span_lossless_lint(cx: &LateContext<'_, '_>, expr: &Expr, op: &Expr, cast_from: Ty<'_>, cast_to: Ty<'_>) {
930 // Do not suggest using From in consts/statics until it is valid to do so (see #2267).
931 if in_constant(cx, expr.hir_id) {
934 // The suggestion is to use a function call, so if the original expression
935 // has parens on the outside, they are no longer needed.
936 let mut applicability = Applicability::MachineApplicable;
937 let opt = snippet_opt(cx, op.span);
938 let sugg = if let Some(ref snip) = opt {
939 if should_strip_parens(op, snip) {
940 &snip[1..snip.len() - 1]
945 applicability = Applicability::HasPlaceholders;
954 "casting {} to {} may become silently lossy if you later change the type",
958 format!("{}::from({})", cast_to, sugg),
969 fn check_loss_of_sign(cx: &LateContext<'_, '_>, expr: &Expr, op: &Expr, cast_from: Ty<'_>, cast_to: Ty<'_>) {
970 if !cast_from.is_signed() || cast_to.is_signed() {
974 // don't lint for positive constants
975 let const_val = constant(cx, &cx.tables, op);
977 if let Some((const_val, _)) = const_val;
978 if let Constant::Int(n) = const_val;
979 if let ty::Int(ity) = cast_from.sty;
980 if sext(cx.tcx, n, ity) >= 0;
990 &format!("casting {} to {} may lose the sign of the value", cast_from, cast_to),
994 fn check_truncation_and_wrapping(cx: &LateContext<'_, '_>, expr: &Expr, cast_from: Ty<'_>, cast_to: Ty<'_>) {
995 let arch_64_suffix = " on targets with 64-bit wide pointers";
996 let arch_32_suffix = " on targets with 32-bit wide pointers";
997 let cast_unsigned_to_signed = !cast_from.is_signed() && cast_to.is_signed();
998 let from_nbits = int_ty_to_nbits(cast_from, cx.tcx);
999 let to_nbits = int_ty_to_nbits(cast_to, cx.tcx);
1000 let (span_truncation, suffix_truncation, span_wrap, suffix_wrap) =
1001 match (is_isize_or_usize(cast_from), is_isize_or_usize(cast_to)) {
1002 (true, true) | (false, false) => (
1003 to_nbits < from_nbits,
1005 to_nbits == from_nbits && cast_unsigned_to_signed,
1015 to_nbits <= 32 && cast_unsigned_to_signed,
1021 cast_unsigned_to_signed,
1022 if from_nbits == 64 {
1029 if span_truncation {
1032 CAST_POSSIBLE_TRUNCATION,
1035 "casting {} to {} may truncate the value{}",
1038 match suffix_truncation {
1039 ArchSuffix::_32 => arch_32_suffix,
1040 ArchSuffix::_64 => arch_64_suffix,
1041 ArchSuffix::None => "",
1052 "casting {} to {} may wrap around the value{}",
1056 ArchSuffix::_32 => arch_32_suffix,
1057 ArchSuffix::_64 => arch_64_suffix,
1058 ArchSuffix::None => "",
1065 fn check_lossless(cx: &LateContext<'_, '_>, expr: &Expr, op: &Expr, cast_from: Ty<'_>, cast_to: Ty<'_>) {
1066 let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed();
1067 let from_nbits = int_ty_to_nbits(cast_from, cx.tcx);
1068 let to_nbits = int_ty_to_nbits(cast_to, cx.tcx);
1069 if !is_isize_or_usize(cast_from) && !is_isize_or_usize(cast_to) && from_nbits < to_nbits && !cast_signed_to_unsigned
1071 span_lossless_lint(cx, expr, op, cast_from, cast_to);
1075 declare_lint_pass!(Casts => [
1076 CAST_PRECISION_LOSS,
1078 CAST_POSSIBLE_TRUNCATION,
1084 FN_TO_NUMERIC_CAST_WITH_TRUNCATION,
1087 // Check if the given type is either `core::ffi::c_void` or
1088 // one of the platform specific `libc::<platform>::c_void` of libc.
1089 fn is_c_void(cx: &LateContext<'_, '_>, ty: Ty<'_>) -> bool {
1090 if let ty::Adt(adt, _) = ty.sty {
1091 let names = cx.get_def_path(adt.did);
1093 if names.is_empty() {
1096 if names[0] == "libc" || names[0] == "core" && *names.last().unwrap() == "c_void" {
1103 /// Returns the mantissa bits wide of a fp type.
1104 /// Will return 0 if the type is not a fp
1105 fn fp_ty_mantissa_nbits(typ: Ty<'_>) -> u32 {
1107 ty::Float(FloatTy::F32) => 23,
1108 ty::Float(FloatTy::F64) | ty::Infer(InferTy::FloatVar(_)) => 52,
1113 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Casts {
1114 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1115 if in_macro_or_desugar(expr.span) {
1118 if let ExprKind::Cast(ref ex, _) = expr.node {
1119 let (cast_from, cast_to) = (cx.tables.expr_ty(ex), cx.tables.expr_ty(expr));
1120 lint_fn_to_numeric_cast(cx, expr, ex, cast_from, cast_to);
1121 if let ExprKind::Lit(ref lit) = ex.node {
1122 use syntax::ast::{LitIntType, LitKind};
1123 if let LitKind::Int(n, _) = lit.node {
1124 if cast_to.is_fp() {
1125 let from_nbits = 128 - n.leading_zeros();
1126 let to_nbits = fp_ty_mantissa_nbits(cast_to);
1127 if from_nbits != 0 && to_nbits != 0 && from_nbits <= to_nbits {
1132 &format!("casting integer literal to {} is unnecessary", cast_to),
1134 format!("{}_{}", n, cast_to),
1135 Applicability::MachineApplicable,
1142 LitKind::Int(_, LitIntType::Unsuffixed) | LitKind::FloatUnsuffixed(_) => {},
1144 if cast_from.sty == cast_to.sty && !in_external_macro(cx.sess(), expr.span) {
1150 "casting to the same type is unnecessary (`{}` -> `{}`)",
1158 if cast_from.is_numeric() && cast_to.is_numeric() && !in_external_macro(cx.sess(), expr.span) {
1159 match (cast_from.is_integral(), cast_to.is_integral()) {
1161 let from_nbits = int_ty_to_nbits(cast_from, cx.tcx);
1162 let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.sty {
1167 if is_isize_or_usize(cast_from) || from_nbits >= to_nbits {
1168 span_precision_loss_lint(cx, expr, cast_from, to_nbits == 64);
1170 if from_nbits < to_nbits {
1171 span_lossless_lint(cx, expr, ex, cast_from, cast_to);
1177 CAST_POSSIBLE_TRUNCATION,
1179 &format!("casting {} to {} may truncate the value", cast_from, cast_to),
1181 if !cast_to.is_signed() {
1186 &format!("casting {} to {} may lose the sign of the value", cast_from, cast_to),
1191 check_loss_of_sign(cx, expr, ex, cast_from, cast_to);
1192 check_truncation_and_wrapping(cx, expr, cast_from, cast_to);
1193 check_lossless(cx, expr, ex, cast_from, cast_to);
1196 if let (&ty::Float(FloatTy::F64), &ty::Float(FloatTy::F32)) = (&cast_from.sty, &cast_to.sty) {
1199 CAST_POSSIBLE_TRUNCATION,
1201 "casting f64 to f32 may truncate the value",
1204 if let (&ty::Float(FloatTy::F32), &ty::Float(FloatTy::F64)) = (&cast_from.sty, &cast_to.sty) {
1205 span_lossless_lint(cx, expr, ex, cast_from, cast_to);
1212 if let ty::RawPtr(from_ptr_ty) = &cast_from.sty;
1213 if let ty::RawPtr(to_ptr_ty) = &cast_to.sty;
1214 if let Some(from_align) = cx.layout_of(from_ptr_ty.ty).ok().map(|a| a.align.abi);
1215 if let Some(to_align) = cx.layout_of(to_ptr_ty.ty).ok().map(|a| a.align.abi);
1216 if from_align < to_align;
1217 // with c_void, we inherently need to trust the user
1218 if !is_c_void(cx, from_ptr_ty.ty);
1224 &format!("casting from `{}` to a more-strictly-aligned pointer (`{}`)", cast_from, cast_to)
1232 fn lint_fn_to_numeric_cast(
1233 cx: &LateContext<'_, '_>,
1239 // We only want to check casts to `ty::Uint` or `ty::Int`
1241 ty::Uint(_) | ty::Int(..) => { /* continue on */ },
1244 match cast_from.sty {
1245 ty::FnDef(..) | ty::FnPtr(_) => {
1246 let mut applicability = Applicability::MachineApplicable;
1247 let from_snippet = snippet_with_applicability(cx, cast_expr.span, "x", &mut applicability);
1249 let to_nbits = int_ty_to_nbits(cast_to, cx.tcx);
1250 if to_nbits < cx.tcx.data_layout.pointer_size.bits() {
1253 FN_TO_NUMERIC_CAST_WITH_TRUNCATION,
1256 "casting function pointer `{}` to `{}`, which truncates the value",
1257 from_snippet, cast_to
1260 format!("{} as usize", from_snippet),
1263 } else if cast_to.sty != ty::Uint(UintTy::Usize) {
1268 &format!("casting function pointer `{}` to `{}`", from_snippet, cast_to),
1270 format!("{} as usize", from_snippet),
1279 declare_clippy_lint! {
1280 /// **What it does:** Checks for types used in structs, parameters and `let`
1281 /// declarations above a certain complexity threshold.
1283 /// **Why is this bad?** Too complex types make the code less readable. Consider
1284 /// using a `type` definition to simplify them.
1286 /// **Known problems:** None.
1291 /// inner: Rc<Vec<Vec<Box<(u32, u32, u32, u32)>>>>,
1294 pub TYPE_COMPLEXITY,
1296 "usage of very complex types that might be better factored into `type` definitions"
1299 pub struct TypeComplexity {
1303 impl TypeComplexity {
1304 pub fn new(threshold: u64) -> Self {
1309 impl_lint_pass!(TypeComplexity => [TYPE_COMPLEXITY]);
1311 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeComplexity {
1314 cx: &LateContext<'a, 'tcx>,
1321 self.check_fndecl(cx, decl);
1324 fn check_struct_field(&mut self, cx: &LateContext<'a, 'tcx>, field: &'tcx hir::StructField) {
1325 // enum variants are also struct fields now
1326 self.check_type(cx, &field.ty);
1329 fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx Item) {
1331 ItemKind::Static(ref ty, _, _) | ItemKind::Const(ref ty, _) => self.check_type(cx, ty),
1332 // functions, enums, structs, impls and traits are covered
1337 fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx TraitItem) {
1339 TraitItemKind::Const(ref ty, _) | TraitItemKind::Type(_, Some(ref ty)) => self.check_type(cx, ty),
1340 TraitItemKind::Method(MethodSig { ref decl, .. }, TraitMethod::Required(_)) => self.check_fndecl(cx, decl),
1341 // methods with default impl are covered by check_fn
1346 fn check_impl_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx ImplItem) {
1348 ImplItemKind::Const(ref ty, _) | ImplItemKind::Type(ref ty) => self.check_type(cx, ty),
1349 // methods are covered by check_fn
1354 fn check_local(&mut self, cx: &LateContext<'a, 'tcx>, local: &'tcx Local) {
1355 if let Some(ref ty) = local.ty {
1356 self.check_type(cx, ty);
1361 impl<'a, 'tcx> TypeComplexity {
1362 fn check_fndecl(&self, cx: &LateContext<'a, 'tcx>, decl: &'tcx FnDecl) {
1363 for arg in &decl.inputs {
1364 self.check_type(cx, arg);
1366 if let Return(ref ty) = decl.output {
1367 self.check_type(cx, ty);
1371 fn check_type(&self, cx: &LateContext<'_, '_>, ty: &hir::Ty) {
1372 if in_macro_or_desugar(ty.span) {
1376 let mut visitor = TypeComplexityVisitor { score: 0, nest: 1 };
1377 visitor.visit_ty(ty);
1381 if score > self.threshold {
1386 "very complex type used. Consider factoring parts into `type` definitions",
1392 /// Walks a type and assigns a complexity score to it.
1393 struct TypeComplexityVisitor {
1394 /// total complexity score of the type
1396 /// current nesting level
1400 impl<'tcx> Visitor<'tcx> for TypeComplexityVisitor {
1401 fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
1402 let (add_score, sub_nest) = match ty.node {
1403 // _, &x and *x have only small overhead; don't mess with nesting level
1404 TyKind::Infer | TyKind::Ptr(..) | TyKind::Rptr(..) => (1, 0),
1406 // the "normal" components of a type: named types, arrays/tuples
1407 TyKind::Path(..) | TyKind::Slice(..) | TyKind::Tup(..) | TyKind::Array(..) => (10 * self.nest, 1),
1409 // function types bring a lot of overhead
1410 TyKind::BareFn(ref bare) if bare.abi == Abi::Rust => (50 * self.nest, 1),
1412 TyKind::TraitObject(ref param_bounds, _) => {
1413 let has_lifetime_parameters = param_bounds.iter().any(|bound| {
1414 bound.bound_generic_params.iter().any(|gen| match gen.kind {
1415 GenericParamKind::Lifetime { .. } => true,
1419 if has_lifetime_parameters {
1420 // complex trait bounds like A<'a, 'b>
1423 // simple trait bounds like A + B
1430 self.score += add_score;
1431 self.nest += sub_nest;
1433 self.nest -= sub_nest;
1435 fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
1436 NestedVisitorMap::None
1440 declare_clippy_lint! {
1441 /// **What it does:** Checks for expressions where a character literal is cast
1442 /// to `u8` and suggests using a byte literal instead.
1444 /// **Why is this bad?** In general, casting values to smaller types is
1445 /// error-prone and should be avoided where possible. In the particular case of
1446 /// converting a character literal to u8, it is easy to avoid by just using a
1447 /// byte literal instead. As an added bonus, `b'a'` is even slightly shorter
1448 /// than `'a' as u8`.
1450 /// **Known problems:** None.
1457 /// A better version, using the byte literal:
1464 "casting a character literal to u8"
1467 declare_lint_pass!(CharLitAsU8 => [CHAR_LIT_AS_U8]);
1469 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for CharLitAsU8 {
1470 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1471 use syntax::ast::LitKind;
1473 if let ExprKind::Cast(ref e, _) = expr.node {
1474 if let ExprKind::Lit(ref l) = e.node {
1475 if let LitKind::Char(_) = l.node {
1476 if ty::Uint(UintTy::U8) == cx.tables.expr_ty(expr).sty && !in_macro_or_desugar(expr.span) {
1477 let msg = "casting character literal to u8. `char`s \
1478 are 4 bytes wide in rust, so casting to u8 \
1481 "Consider using a byte literal instead:\nb{}",
1482 snippet(cx, e.span, "'x'")
1484 span_help_and_lint(cx, CHAR_LIT_AS_U8, expr.span, msg, &help);
1492 declare_clippy_lint! {
1493 /// **What it does:** Checks for comparisons where one side of the relation is
1494 /// either the minimum or maximum value for its type and warns if it involves a
1495 /// case that is always true or always false. Only integer and boolean types are
1498 /// **Why is this bad?** An expression like `min <= x` may misleadingly imply
1499 /// that is is possible for `x` to be less than the minimum. Expressions like
1500 /// `max < x` are probably mistakes.
1502 /// **Known problems:** For `usize` the size of the current compile target will
1503 /// be assumed (e.g., 64 bits on 64 bit systems). This means code that uses such
1504 /// a comparison to detect target pointer width will trigger this lint. One can
1505 /// use `mem::sizeof` and compare its value or conditional compilation
1507 /// like `#[cfg(target_pointer_width = "64")] ..` instead.
1512 /// let vec: Vec<isize> = vec![];
1513 /// if vec.len() <= 0 {}
1514 /// if 100 > std::i32::MAX {}
1516 pub ABSURD_EXTREME_COMPARISONS,
1518 "a comparison with a maximum or minimum value that is always true or false"
1521 declare_lint_pass!(AbsurdExtremeComparisons => [ABSURD_EXTREME_COMPARISONS]);
1528 struct ExtremeExpr<'a> {
1533 enum AbsurdComparisonResult {
1536 InequalityImpossible,
1539 fn is_cast_between_fixed_and_target<'a, 'tcx>(cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) -> bool {
1540 if let ExprKind::Cast(ref cast_exp, _) = expr.node {
1541 let precast_ty = cx.tables.expr_ty(cast_exp);
1542 let cast_ty = cx.tables.expr_ty(expr);
1544 return is_isize_or_usize(precast_ty) != is_isize_or_usize(cast_ty);
1550 fn detect_absurd_comparison<'a, 'tcx>(
1551 cx: &LateContext<'a, 'tcx>,
1555 ) -> Option<(ExtremeExpr<'tcx>, AbsurdComparisonResult)> {
1556 use crate::types::AbsurdComparisonResult::*;
1557 use crate::types::ExtremeType::*;
1558 use crate::utils::comparisons::*;
1560 // absurd comparison only makes sense on primitive types
1561 // primitive types don't implement comparison operators with each other
1562 if cx.tables.expr_ty(lhs) != cx.tables.expr_ty(rhs) {
1566 // comparisons between fix sized types and target sized types are considered unanalyzable
1567 if is_cast_between_fixed_and_target(cx, lhs) || is_cast_between_fixed_and_target(cx, rhs) {
1571 let normalized = normalize_comparison(op, lhs, rhs);
1572 let (rel, normalized_lhs, normalized_rhs) = if let Some(val) = normalized {
1578 let lx = detect_extreme_expr(cx, normalized_lhs);
1579 let rx = detect_extreme_expr(cx, normalized_rhs);
1584 (Some(l @ ExtremeExpr { which: Maximum, .. }), _) => (l, AlwaysFalse), // max < x
1585 (_, Some(r @ ExtremeExpr { which: Minimum, .. })) => (r, AlwaysFalse), // x < min
1591 (Some(l @ ExtremeExpr { which: Minimum, .. }), _) => (l, AlwaysTrue), // min <= x
1592 (Some(l @ ExtremeExpr { which: Maximum, .. }), _) => (l, InequalityImpossible), // max <= x
1593 (_, Some(r @ ExtremeExpr { which: Minimum, .. })) => (r, InequalityImpossible), // x <= min
1594 (_, Some(r @ ExtremeExpr { which: Maximum, .. })) => (r, AlwaysTrue), // x <= max
1598 Rel::Ne | Rel::Eq => return None,
1602 fn detect_extreme_expr<'a, 'tcx>(cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) -> Option<ExtremeExpr<'tcx>> {
1603 use crate::types::ExtremeType::*;
1605 let ty = cx.tables.expr_ty(expr);
1607 let cv = constant(cx, cx.tables, expr)?.0;
1609 let which = match (&ty.sty, cv) {
1610 (&ty::Bool, Constant::Bool(false)) | (&ty::Uint(_), Constant::Int(0)) => Minimum,
1611 (&ty::Int(ity), Constant::Int(i))
1612 if i == unsext(cx.tcx, i128::min_value() >> (128 - int_bits(cx.tcx, ity)), ity) =>
1617 (&ty::Bool, Constant::Bool(true)) => Maximum,
1618 (&ty::Int(ity), Constant::Int(i))
1619 if i == unsext(cx.tcx, i128::max_value() >> (128 - int_bits(cx.tcx, ity)), ity) =>
1623 (&ty::Uint(uty), Constant::Int(i)) if clip(cx.tcx, u128::max_value(), uty) == i => Maximum,
1627 Some(ExtremeExpr { which, expr })
1630 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for AbsurdExtremeComparisons {
1631 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1632 use crate::types::AbsurdComparisonResult::*;
1633 use crate::types::ExtremeType::*;
1635 if let ExprKind::Binary(ref cmp, ref lhs, ref rhs) = expr.node {
1636 if let Some((culprit, result)) = detect_absurd_comparison(cx, cmp.node, lhs, rhs) {
1637 if !in_macro_or_desugar(expr.span) {
1638 let msg = "this comparison involving the minimum or maximum element for this \
1639 type contains a case that is always true or always false";
1641 let conclusion = match result {
1642 AlwaysFalse => "this comparison is always false".to_owned(),
1643 AlwaysTrue => "this comparison is always true".to_owned(),
1644 InequalityImpossible => format!(
1645 "the case where the two sides are not equal never occurs, consider using {} == {} \
1647 snippet(cx, lhs.span, "lhs"),
1648 snippet(cx, rhs.span, "rhs")
1653 "because {} is the {} value for this type, {}",
1654 snippet(cx, culprit.expr.span, "x"),
1655 match culprit.which {
1656 Minimum => "minimum",
1657 Maximum => "maximum",
1662 span_help_and_lint(cx, ABSURD_EXTREME_COMPARISONS, expr.span, msg, &help);
1669 declare_clippy_lint! {
1670 /// **What it does:** Checks for comparisons where the relation is always either
1671 /// true or false, but where one side has been upcast so that the comparison is
1672 /// necessary. Only integer types are checked.
1674 /// **Why is this bad?** An expression like `let x : u8 = ...; (x as u32) > 300`
1675 /// will mistakenly imply that it is possible for `x` to be outside the range of
1678 /// **Known problems:**
1679 /// https://github.com/rust-lang/rust-clippy/issues/886
1683 /// let x : u8 = ...; (x as u32) > 300
1685 pub INVALID_UPCAST_COMPARISONS,
1687 "a comparison involving an upcast which is always true or false"
1690 declare_lint_pass!(InvalidUpcastComparisons => [INVALID_UPCAST_COMPARISONS]);
1692 #[derive(Copy, Clone, Debug, Eq)]
1699 #[allow(clippy::cast_sign_loss)]
1700 fn cmp_s_u(s: i128, u: u128) -> Ordering {
1703 } else if u > (i128::max_value() as u128) {
1711 impl PartialEq for FullInt {
1712 fn eq(&self, other: &Self) -> bool {
1713 self.partial_cmp(other).expect("partial_cmp only returns Some(_)") == Ordering::Equal
1717 impl PartialOrd for FullInt {
1718 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1719 Some(match (self, other) {
1720 (&FullInt::S(s), &FullInt::S(o)) => s.cmp(&o),
1721 (&FullInt::U(s), &FullInt::U(o)) => s.cmp(&o),
1722 (&FullInt::S(s), &FullInt::U(o)) => Self::cmp_s_u(s, o),
1723 (&FullInt::U(s), &FullInt::S(o)) => Self::cmp_s_u(o, s).reverse(),
1727 impl Ord for FullInt {
1728 fn cmp(&self, other: &Self) -> Ordering {
1729 self.partial_cmp(other)
1730 .expect("partial_cmp for FullInt can never return None")
1734 fn numeric_cast_precast_bounds<'a>(cx: &LateContext<'_, '_>, expr: &'a Expr) -> Option<(FullInt, FullInt)> {
1737 if let ExprKind::Cast(ref cast_exp, _) = expr.node {
1738 let pre_cast_ty = cx.tables.expr_ty(cast_exp);
1739 let cast_ty = cx.tables.expr_ty(expr);
1740 // if it's a cast from i32 to u32 wrapping will invalidate all these checks
1741 if cx.layout_of(pre_cast_ty).ok().map(|l| l.size) == cx.layout_of(cast_ty).ok().map(|l| l.size) {
1744 match pre_cast_ty.sty {
1745 ty::Int(int_ty) => Some(match int_ty {
1747 FullInt::S(i128::from(i8::min_value())),
1748 FullInt::S(i128::from(i8::max_value())),
1751 FullInt::S(i128::from(i16::min_value())),
1752 FullInt::S(i128::from(i16::max_value())),
1755 FullInt::S(i128::from(i32::min_value())),
1756 FullInt::S(i128::from(i32::max_value())),
1759 FullInt::S(i128::from(i64::min_value())),
1760 FullInt::S(i128::from(i64::max_value())),
1762 IntTy::I128 => (FullInt::S(i128::min_value()), FullInt::S(i128::max_value())),
1764 FullInt::S(isize::min_value() as i128),
1765 FullInt::S(isize::max_value() as i128),
1768 ty::Uint(uint_ty) => Some(match uint_ty {
1770 FullInt::U(u128::from(u8::min_value())),
1771 FullInt::U(u128::from(u8::max_value())),
1774 FullInt::U(u128::from(u16::min_value())),
1775 FullInt::U(u128::from(u16::max_value())),
1778 FullInt::U(u128::from(u32::min_value())),
1779 FullInt::U(u128::from(u32::max_value())),
1782 FullInt::U(u128::from(u64::min_value())),
1783 FullInt::U(u128::from(u64::max_value())),
1785 UintTy::U128 => (FullInt::U(u128::min_value()), FullInt::U(u128::max_value())),
1787 FullInt::U(usize::min_value() as u128),
1788 FullInt::U(usize::max_value() as u128),
1798 fn node_as_const_fullint<'a, 'tcx>(cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) -> Option<FullInt> {
1799 let val = constant(cx, cx.tables, expr)?.0;
1800 if let Constant::Int(const_int) = val {
1801 match cx.tables.expr_ty(expr).sty {
1802 ty::Int(ity) => Some(FullInt::S(sext(cx.tcx, const_int, ity))),
1803 ty::Uint(_) => Some(FullInt::U(const_int)),
1811 fn err_upcast_comparison(cx: &LateContext<'_, '_>, span: Span, expr: &Expr, always: bool) {
1812 if let ExprKind::Cast(ref cast_val, _) = expr.node {
1815 INVALID_UPCAST_COMPARISONS,
1818 "because of the numeric bounds on `{}` prior to casting, this expression is always {}",
1819 snippet(cx, cast_val.span, "the expression"),
1820 if always { "true" } else { "false" },
1826 fn upcast_comparison_bounds_err<'a, 'tcx>(
1827 cx: &LateContext<'a, 'tcx>,
1829 rel: comparisons::Rel,
1830 lhs_bounds: Option<(FullInt, FullInt)>,
1835 use crate::utils::comparisons::*;
1837 if let Some((lb, ub)) = lhs_bounds {
1838 if let Some(norm_rhs_val) = node_as_const_fullint(cx, rhs) {
1839 if rel == Rel::Eq || rel == Rel::Ne {
1840 if norm_rhs_val < lb || norm_rhs_val > ub {
1841 err_upcast_comparison(cx, span, lhs, rel == Rel::Ne);
1843 } else if match rel {
1858 Rel::Eq | Rel::Ne => unreachable!(),
1860 err_upcast_comparison(cx, span, lhs, true)
1861 } else if match rel {
1876 Rel::Eq | Rel::Ne => unreachable!(),
1878 err_upcast_comparison(cx, span, lhs, false)
1884 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidUpcastComparisons {
1885 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1886 if let ExprKind::Binary(ref cmp, ref lhs, ref rhs) = expr.node {
1887 let normalized = comparisons::normalize_comparison(cmp.node, lhs, rhs);
1888 let (rel, normalized_lhs, normalized_rhs) = if let Some(val) = normalized {
1894 let lhs_bounds = numeric_cast_precast_bounds(cx, normalized_lhs);
1895 let rhs_bounds = numeric_cast_precast_bounds(cx, normalized_rhs);
1897 upcast_comparison_bounds_err(cx, expr.span, rel, lhs_bounds, normalized_lhs, normalized_rhs, false);
1898 upcast_comparison_bounds_err(cx, expr.span, rel, rhs_bounds, normalized_rhs, normalized_lhs, true);
1903 declare_clippy_lint! {
1904 /// **What it does:** Checks for public `impl` or `fn` missing generalization
1905 /// over different hashers and implicitly defaulting to the default hashing
1906 /// algorithm (SipHash).
1908 /// **Why is this bad?** `HashMap` or `HashSet` with custom hashers cannot be
1911 /// **Known problems:** Suggestions for replacing constructors can contain
1912 /// false-positives. Also applying suggestions can require modification of other
1913 /// pieces of code, possibly including external crates.
1917 /// # use std::collections::HashMap;
1918 /// # use std::hash::Hash;
1919 /// # trait Serialize {};
1920 /// impl<K: Hash + Eq, V> Serialize for HashMap<K, V> { }
1922 /// pub fn foo(map: &mut HashMap<i32, i32>) { }
1924 pub IMPLICIT_HASHER,
1926 "missing generalization over different hashers"
1929 declare_lint_pass!(ImplicitHasher => [IMPLICIT_HASHER]);
1931 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ImplicitHasher {
1932 #[allow(clippy::cast_possible_truncation, clippy::too_many_lines)]
1933 fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx Item) {
1934 use syntax_pos::BytePos;
1936 fn suggestion<'a, 'tcx>(
1937 cx: &LateContext<'a, 'tcx>,
1938 db: &mut DiagnosticBuilder<'_>,
1939 generics_span: Span,
1940 generics_suggestion_span: Span,
1941 target: &ImplicitHasherType<'_>,
1942 vis: ImplicitHasherConstructorVisitor<'_, '_, '_>,
1944 let generics_snip = snippet(cx, generics_span, "");
1946 let generics_snip = if generics_snip.is_empty() {
1949 &generics_snip[1..generics_snip.len() - 1]
1954 "consider adding a type parameter".to_string(),
1957 generics_suggestion_span,
1959 "<{}{}S: ::std::hash::BuildHasher{}>",
1961 if generics_snip.is_empty() { "" } else { ", " },
1962 if vis.suggestions.is_empty() {
1965 // request users to add `Default` bound so that generic constructors can be used
1972 format!("{}<{}, S>", target.type_name(), target.type_arguments(),),
1977 if !vis.suggestions.is_empty() {
1978 multispan_sugg(db, "...and use generic constructor".into(), vis.suggestions);
1982 if !cx.access_levels.is_exported(item.hir_id) {
1987 ItemKind::Impl(_, _, _, ref generics, _, ref ty, ref items) => {
1988 let mut vis = ImplicitHasherTypeVisitor::new(cx);
1991 for target in &vis.found {
1992 if differing_macro_contexts(item.span, target.span()) {
1996 let generics_suggestion_span = generics.span.substitute_dummy({
1997 let pos = snippet_opt(cx, item.span.until(target.span()))
1998 .and_then(|snip| Some(item.span.lo() + BytePos(snip.find("impl")? as u32 + 4)));
1999 if let Some(pos) = pos {
2000 Span::new(pos, pos, item.span.data().ctxt)
2006 let mut ctr_vis = ImplicitHasherConstructorVisitor::new(cx, target);
2007 for item in items.iter().map(|item| cx.tcx.hir().impl_item(item.id)) {
2008 ctr_vis.visit_impl_item(item);
2016 "impl for `{}` should be generalized over different hashers",
2020 suggestion(cx, db, generics.span, generics_suggestion_span, target, ctr_vis);
2025 ItemKind::Fn(ref decl, .., ref generics, body_id) => {
2026 let body = cx.tcx.hir().body(body_id);
2028 for ty in &decl.inputs {
2029 let mut vis = ImplicitHasherTypeVisitor::new(cx);
2032 for target in &vis.found {
2033 let generics_suggestion_span = generics.span.substitute_dummy({
2034 let pos = snippet_opt(cx, item.span.until(body.arguments[0].pat.span))
2036 let i = snip.find("fn")?;
2037 Some(item.span.lo() + BytePos((i + (&snip[i..]).find('(')?) as u32))
2039 .expect("failed to create span for type parameters");
2040 Span::new(pos, pos, item.span.data().ctxt)
2043 let mut ctr_vis = ImplicitHasherConstructorVisitor::new(cx, target);
2044 ctr_vis.visit_body(body);
2051 "parameter of type `{}` should be generalized over different hashers",
2055 suggestion(cx, db, generics.span, generics_suggestion_span, target, ctr_vis);
2066 enum ImplicitHasherType<'tcx> {
2067 HashMap(Span, Ty<'tcx>, Cow<'static, str>, Cow<'static, str>),
2068 HashSet(Span, Ty<'tcx>, Cow<'static, str>),
2071 impl<'tcx> ImplicitHasherType<'tcx> {
2072 /// Checks that `ty` is a target type without a BuildHasher.
2073 fn new<'a>(cx: &LateContext<'a, 'tcx>, hir_ty: &hir::Ty) -> Option<Self> {
2074 if let TyKind::Path(QPath::Resolved(None, ref path)) = hir_ty.node {
2075 let params: Vec<_> = path
2083 .filter_map(|arg| match arg {
2084 GenericArg::Type(ty) => Some(ty),
2088 let params_len = params.len();
2090 let ty = hir_ty_to_ty(cx.tcx, hir_ty);
2092 if match_path(path, &*paths::HASHMAP) && params_len == 2 {
2093 Some(ImplicitHasherType::HashMap(
2096 snippet(cx, params[0].span, "K"),
2097 snippet(cx, params[1].span, "V"),
2099 } else if match_path(path, &*paths::HASHSET) && params_len == 1 {
2100 Some(ImplicitHasherType::HashSet(
2103 snippet(cx, params[0].span, "T"),
2113 fn type_name(&self) -> &'static str {
2115 ImplicitHasherType::HashMap(..) => "HashMap",
2116 ImplicitHasherType::HashSet(..) => "HashSet",
2120 fn type_arguments(&self) -> String {
2122 ImplicitHasherType::HashMap(.., ref k, ref v) => format!("{}, {}", k, v),
2123 ImplicitHasherType::HashSet(.., ref t) => format!("{}", t),
2127 fn ty(&self) -> Ty<'tcx> {
2129 ImplicitHasherType::HashMap(_, ty, ..) | ImplicitHasherType::HashSet(_, ty, ..) => ty,
2133 fn span(&self) -> Span {
2135 ImplicitHasherType::HashMap(span, ..) | ImplicitHasherType::HashSet(span, ..) => span,
2140 struct ImplicitHasherTypeVisitor<'a, 'tcx: 'a> {
2141 cx: &'a LateContext<'a, 'tcx>,
2142 found: Vec<ImplicitHasherType<'tcx>>,
2145 impl<'a, 'tcx: 'a> ImplicitHasherTypeVisitor<'a, 'tcx> {
2146 fn new(cx: &'a LateContext<'a, 'tcx>) -> Self {
2147 Self { cx, found: vec![] }
2151 impl<'a, 'tcx: 'a> Visitor<'tcx> for ImplicitHasherTypeVisitor<'a, 'tcx> {
2152 fn visit_ty(&mut self, t: &'tcx hir::Ty) {
2153 if let Some(target) = ImplicitHasherType::new(self.cx, t) {
2154 self.found.push(target);
2160 fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
2161 NestedVisitorMap::None
2165 /// Looks for default-hasher-dependent constructors like `HashMap::new`.
2166 struct ImplicitHasherConstructorVisitor<'a, 'b, 'tcx: 'a + 'b> {
2167 cx: &'a LateContext<'a, 'tcx>,
2168 body: &'a TypeckTables<'tcx>,
2169 target: &'b ImplicitHasherType<'tcx>,
2170 suggestions: BTreeMap<Span, String>,
2173 impl<'a, 'b, 'tcx: 'a + 'b> ImplicitHasherConstructorVisitor<'a, 'b, 'tcx> {
2174 fn new(cx: &'a LateContext<'a, 'tcx>, target: &'b ImplicitHasherType<'tcx>) -> Self {
2179 suggestions: BTreeMap::new(),
2184 impl<'a, 'b, 'tcx: 'a + 'b> Visitor<'tcx> for ImplicitHasherConstructorVisitor<'a, 'b, 'tcx> {
2185 fn visit_body(&mut self, body: &'tcx Body) {
2186 let prev_body = self.body;
2187 self.body = self.cx.tcx.body_tables(body.id());
2188 walk_body(self, body);
2189 self.body = prev_body;
2192 fn visit_expr(&mut self, e: &'tcx Expr) {
2194 if let ExprKind::Call(ref fun, ref args) = e.node;
2195 if let ExprKind::Path(QPath::TypeRelative(ref ty, ref method)) = fun.node;
2196 if let TyKind::Path(QPath::Resolved(None, ref ty_path)) = ty.node;
2198 if !same_tys(self.cx, self.target.ty(), self.body.expr_ty(e)) {
2202 if match_path(ty_path, &*paths::HASHMAP) {
2203 if method.ident.name == *sym::new {
2205 .insert(e.span, "HashMap::default()".to_string());
2206 } else if method.ident.name == *sym::with_capacity {
2207 self.suggestions.insert(
2210 "HashMap::with_capacity_and_hasher({}, Default::default())",
2211 snippet(self.cx, args[0].span, "capacity"),
2215 } else if match_path(ty_path, &*paths::HASHSET) {
2216 if method.ident.name == *sym::new {
2218 .insert(e.span, "HashSet::default()".to_string());
2219 } else if method.ident.name == *sym::with_capacity {
2220 self.suggestions.insert(
2223 "HashSet::with_capacity_and_hasher({}, Default::default())",
2224 snippet(self.cx, args[0].span, "capacity"),
2235 fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
2236 NestedVisitorMap::OnlyBodies(&self.cx.tcx.hir())
2240 declare_clippy_lint! {
2241 /// **What it does:** Checks for casts of `&T` to `&mut T` anywhere in the code.
2243 /// **Why is this bad?** It’s basically guaranteed to be undefined behaviour.
2244 /// `UnsafeCell` is the only way to obtain aliasable data that is considered
2247 /// **Known problems:** None.
2253 /// *(r as *const _ as *mut _) += 1;
2258 /// Instead consider using interior mutability types.
2261 /// use std::cell::UnsafeCell;
2263 /// fn x(r: &UnsafeCell<i32>) {
2269 pub CAST_REF_TO_MUT,
2271 "a cast of reference to a mutable pointer"
2274 declare_lint_pass!(RefToMut => [CAST_REF_TO_MUT]);
2276 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for RefToMut {
2277 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
2279 if let ExprKind::Unary(UnOp::UnDeref, e) = &expr.node;
2280 if let ExprKind::Cast(e, t) = &e.node;
2281 if let TyKind::Ptr(MutTy { mutbl: Mutability::MutMutable, .. }) = t.node;
2282 if let ExprKind::Cast(e, t) = &e.node;
2283 if let TyKind::Ptr(MutTy { mutbl: Mutability::MutImmutable, .. }) = t.node;
2284 if let ty::Ref(..) = cx.tables.node_type(e.hir_id).sty;
2290 "casting &T to &mut T may cause undefined behaviour, consider instead using an UnsafeCell",