4 use rustc::hir::intravisit::{walk_ty, FnKind, NestedVisitorMap, Visitor};
6 use rustc::ty::{self, Ty, TyCtxt};
7 use rustc::ty::subst::Substs;
8 use std::cmp::Ordering;
9 use syntax::ast::{FloatTy, IntTy, UintTy};
10 use syntax::attr::IntType;
11 use syntax::codemap::Span;
12 use utils::{comparisons, higher, in_external_macro, in_macro, last_path_segment, match_def_path, match_path,
13 opt_def_id, snippet, snippet_opt, span_help_and_lint, span_lint, span_lint_and_sugg, type_size};
16 /// Handles all the linting of funky types
17 #[allow(missing_copy_implementations)]
20 /// **What it does:** Checks for use of `Box<Vec<_>>` anywhere in the code.
22 /// **Why is this bad?** `Vec` already keeps its contents in a separate area on
23 /// the heap. So if you `Box` it, you just add another level of indirection
24 /// without any benefit whatsoever.
26 /// **Known problems:** None.
31 /// values: Box<Vec<Foo>>,
37 "usage of `Box<Vec<T>>`, vector elements are already on the heap"
40 /// **What it does:** Checks for usage of any `LinkedList`, suggesting to use a
41 /// `Vec` or a `VecDeque` (formerly called `RingBuf`).
43 /// **Why is this bad?** Gankro says:
45 /// > The TL;DR of `LinkedList` is that it's built on a massive amount of
46 /// pointers and indirection.
47 /// > It wastes memory, it has terrible cache locality, and is all-around slow.
49 /// > "only" amortized for push/pop, should be faster in the general case for
50 /// almost every possible
51 /// > workload, and isn't even amortized at all if you can predict the capacity
54 /// > `LinkedList`s are only really good if you're doing a lot of merging or
55 /// splitting of lists.
56 /// > This is because they can just mangle some pointers instead of actually
57 /// copying the data. Even
58 /// > if you're doing a lot of insertion in the middle of the list, `RingBuf`
59 /// can still be better
60 /// > because of how expensive it is to seek to the middle of a `LinkedList`.
62 /// **Known problems:** False positives – the instances where using a
63 /// `LinkedList` makes sense are few and far between, but they can still happen.
67 /// let x = LinkedList::new();
72 "usage of LinkedList, usually a vector is faster, or a more specialized data \
73 structure like a VecDeque"
76 /// **What it does:** Checks for use of `&Box<T>` anywhere in the code.
78 /// **Why is this bad?** Any `&Box<T>` can also be a `&T`, which is more
81 /// **Known problems:** None.
85 /// fn foo(bar: &Box<T>) { ... }
90 "a borrow of a boxed type"
93 impl LintPass for TypePass {
94 fn get_lints(&self) -> LintArray {
95 lint_array!(BOX_VEC, LINKEDLIST, BORROWED_BOX)
99 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypePass {
100 fn check_fn(&mut self, cx: &LateContext, _: FnKind, decl: &FnDecl, _: &Body, _: Span, id: NodeId) {
101 // skip trait implementations, see #605
102 if let Some(map::NodeItem(item)) = cx.tcx.hir.find(cx.tcx.hir.get_parent(id)) {
103 if let ItemImpl(_, _, _, _, Some(..), _, _) = item.node {
108 check_fn_decl(cx, decl);
111 fn check_struct_field(&mut self, cx: &LateContext, field: &StructField) {
112 check_ty(cx, &field.ty, false);
115 fn check_trait_item(&mut self, cx: &LateContext, item: &TraitItem) {
117 TraitItemKind::Const(ref ty, _) | TraitItemKind::Type(_, Some(ref ty)) => check_ty(cx, ty, false),
118 TraitItemKind::Method(ref sig, _) => check_fn_decl(cx, &sig.decl),
123 fn check_local(&mut self, cx: &LateContext, local: &Local) {
124 if let Some(ref ty) = local.ty {
125 check_ty(cx, ty, true);
130 fn check_fn_decl(cx: &LateContext, decl: &FnDecl) {
131 for input in &decl.inputs {
132 check_ty(cx, input, false);
135 if let FunctionRetTy::Return(ref ty) = decl.output {
136 check_ty(cx, ty, false);
140 /// Recursively check for `TypePass` lints in the given type. Stop at the first
143 /// The parameter `is_local` distinguishes the context of the type; types from
144 /// local bindings should only be checked for the `BORROWED_BOX` lint.
145 fn check_ty(cx: &LateContext, ast_ty: &hir::Ty, is_local: bool) {
146 if in_macro(ast_ty.span) {
150 TyPath(ref qpath) if !is_local => {
151 let hir_id = cx.tcx.hir.node_to_hir_id(ast_ty.id);
152 let def = cx.tables.qpath_def(qpath, hir_id);
153 if let Some(def_id) = opt_def_id(def) {
154 if Some(def_id) == cx.tcx.lang_items.owned_box() {
155 let last = last_path_segment(qpath);
157 !last.parameters.parenthesized,
158 let Some(vec) = last.parameters.types.get(0),
159 let TyPath(ref qpath) = vec.node,
160 let Some(did) = opt_def_id(cx.tables.qpath_def(qpath, cx.tcx.hir.node_to_hir_id(vec.id))),
161 match_def_path(cx.tcx, did, &paths::VEC),
163 span_help_and_lint(cx,
166 "you seem to be trying to use `Box<Vec<T>>`. Consider using just `Vec<T>`",
167 "`Vec<T>` is already on the heap, `Box<Vec<T>>` makes an extra allocation.");
168 return; // don't recurse into the type
170 } else if match_def_path(cx.tcx, def_id, &paths::LINKED_LIST) {
175 "I see you're using a LinkedList! Perhaps you meant some other data structure?",
176 "a VecDeque might work",
178 return; // don't recurse into the type
182 QPath::Resolved(Some(ref ty), ref p) => {
183 check_ty(cx, ty, is_local);
186 .flat_map(|seg| seg.parameters.types.iter())
188 check_ty(cx, ty, is_local);
191 QPath::Resolved(None, ref p) => for ty in p.segments
193 .flat_map(|seg| seg.parameters.types.iter())
195 check_ty(cx, ty, is_local);
197 QPath::TypeRelative(ref ty, ref seg) => {
198 check_ty(cx, ty, is_local);
199 for ty in seg.parameters.types.iter() {
200 check_ty(cx, ty, is_local);
205 TyRptr(ref lt, MutTy { ref ty, ref mutbl }) => {
207 TyPath(ref qpath) => {
208 let hir_id = cx.tcx.hir.node_to_hir_id(ty.id);
209 let def = cx.tables.qpath_def(qpath, hir_id);
211 let Some(def_id) = opt_def_id(def),
212 Some(def_id) == cx.tcx.lang_items.owned_box(),
213 let QPath::Resolved(None, ref path) = *qpath,
214 let [ref bx] = *path.segments,
215 !bx.parameters.parenthesized,
216 let [ref inner] = *bx.parameters.types
218 if is_any_trait(inner) {
219 // Ignore `Box<Any>` types, see #1884 for details.
223 let ltopt = if lt.is_elided() {
226 format!("{} ", lt.name.as_str())
228 let mutopt = if *mutbl == Mutability::MutMutable {
233 span_lint_and_sugg(cx,
236 "you seem to be trying to use `&Box<T>`. Consider using just `&T`",
238 format!("&{}{}{}", ltopt, mutopt, &snippet(cx, inner.span, ".."))
240 return; // don't recurse into the type
242 check_ty(cx, ty, is_local);
244 _ => check_ty(cx, ty, is_local),
248 TySlice(ref ty) | TyArray(ref ty, _) | TyPtr(MutTy { ref ty, .. }) => check_ty(cx, ty, is_local),
249 TyTup(ref tys) => for ty in tys {
250 check_ty(cx, ty, is_local);
256 // Returns true if given type is `Any` trait.
257 fn is_any_trait(t: &hir::Ty) -> bool {
259 let TyTraitObject(ref traits, _) = t.node,
261 // Only Send/Sync can be used as additional traits, so it is enough to
262 // check only the first trait.
263 match_path(&traits[0].trait_ref.path, &paths::ANY_TRAIT)
271 #[allow(missing_copy_implementations)]
274 /// **What it does:** Checks for binding a unit value.
276 /// **Why is this bad?** A unit value cannot usefully be used anywhere. So
277 /// binding one is kind of pointless.
279 /// **Known problems:** None.
288 "creating a let binding to a value of unit type, which usually can't be used afterwards"
291 fn check_let_unit(cx: &LateContext, decl: &Decl) {
292 if let DeclLocal(ref local) = decl.node {
293 match cx.tables.pat_ty(&local.pat).sty {
294 ty::TyTuple(slice, _) if slice.is_empty() => {
295 if in_external_macro(cx, decl.span) || in_macro(local.pat.span) {
298 if higher::is_from_for_desugar(decl) {
306 "this let-binding has unit value. Consider omitting `let {} =`",
307 snippet(cx, local.pat.span, "..")
316 impl LintPass for LetPass {
317 fn get_lints(&self) -> LintArray {
318 lint_array!(LET_UNIT_VALUE)
322 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for LetPass {
323 fn check_decl(&mut self, cx: &LateContext<'a, 'tcx>, decl: &'tcx Decl) {
324 check_let_unit(cx, decl)
328 /// **What it does:** Checks for comparisons to unit.
330 /// **Why is this bad?** Unit is always equal to itself, and thus is just a
331 /// clumsily written constant. Mostly this happens when someone accidentally
332 /// adds semicolons at the end of the operands.
334 /// **Known problems:** None.
338 /// if { foo(); } == { bar(); } { baz(); }
342 /// { foo(); bar(); baz(); }
347 "comparing unit values"
350 #[allow(missing_copy_implementations)]
353 impl LintPass for UnitCmp {
354 fn get_lints(&self) -> LintArray {
355 lint_array!(UNIT_CMP)
359 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnitCmp {
360 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
361 if in_macro(expr.span) {
364 if let ExprBinary(ref cmp, ref left, _) = expr.node {
366 if op.is_comparison() {
367 match cx.tables.expr_ty(left).sty {
368 ty::TyTuple(slice, _) if slice.is_empty() => {
369 let result = match op {
370 BiEq | BiLe | BiGe => "true",
378 "{}-comparison of unit values detected. This will always be {}",
393 /// **What it does:** Checks for casts from any numerical to a float type where
394 /// the receiving type cannot store all values from the original type without
395 /// rounding errors. This possible rounding is to be expected, so this lint is
396 /// `Allow` by default.
398 /// Basically, this warns on casting any integer with 32 or more bits to `f32`
399 /// or any 64-bit integer to `f64`.
401 /// **Why is this bad?** It's not bad at all. But in some applications it can be
402 /// helpful to know where precision loss can take place. This lint can help find
403 /// those places in the code.
405 /// **Known problems:** None.
409 /// let x = u64::MAX; x as f64
412 pub CAST_PRECISION_LOSS,
414 "casts that cause loss of precision, e.g. `x as f32` where `x: u64`"
417 /// **What it does:** Checks for casts from a signed to an unsigned numerical
418 /// type. In this case, negative values wrap around to large positive values,
419 /// which can be quite surprising in practice. However, as the cast works as
420 /// defined, this lint is `Allow` by default.
422 /// **Why is this bad?** Possibly surprising results. You can activate this lint
423 /// as a one-time check to see where numerical wrapping can arise.
425 /// **Known problems:** None.
430 /// y as u128 // will return 18446744073709551615
435 "casts from signed types to unsigned types, e.g. `x as u32` where `x: i32`"
438 /// **What it does:** Checks for on casts between numerical types that may
439 /// truncate large values. This is expected behavior, so the cast is `Allow` by
442 /// **Why is this bad?** In some problem domains, it is good practice to avoid
443 /// truncation. This lint can be activated to help assess where additional
444 /// checks could be beneficial.
446 /// **Known problems:** None.
450 /// fn as_u8(x: u64) -> u8 { x as u8 }
453 pub CAST_POSSIBLE_TRUNCATION,
455 "casts that may cause truncation of the value, e.g. `x as u8` where `x: u32`, \
456 or `x as i32` where `x: f32`"
459 /// **What it does:** Checks for casts from an unsigned type to a signed type of
460 /// the same size. Performing such a cast is a 'no-op' for the compiler,
461 /// i.e. nothing is changed at the bit level, and the binary representation of
462 /// the value is reinterpreted. This can cause wrapping if the value is too big
463 /// for the target signed type. However, the cast works as defined, so this lint
464 /// is `Allow` by default.
466 /// **Why is this bad?** While such a cast is not bad in itself, the results can
467 /// be surprising when this is not the intended behavior, as demonstrated by the
470 /// **Known problems:** None.
474 /// u32::MAX as i32 // will yield a value of `-1`
477 pub CAST_POSSIBLE_WRAP,
479 "casts that may cause wrapping around the value, e.g. `x as i32` where `x: u32` \
483 /// **What it does:** Checks for on casts between numerical types that may
484 /// be replaced by safe conversion functions.
486 /// **Why is this bad?** Rust's `as` keyword will perform many kinds of
487 /// conversions, including silently lossy conversions. Conversion functions such
488 /// as `i32::from` will only perform lossless conversions. Using the conversion
489 /// functions prevents conversions from turning into silent lossy conversions if
490 /// the types of the input expressions ever change, and make it easier for
491 /// people reading the code to know that the conversion is lossless.
493 /// **Known problems:** None.
497 /// fn as_u64(x: u8) -> u64 { x as u64 }
502 "casts using `as` that are known to be lossless, e.g. `x as u64` where `x: u8`"
505 /// **What it does:** Checks for casts to the same type.
507 /// **Why is this bad?** It's just unnecessary.
509 /// **Known problems:** None.
513 /// let _ = 2i32 as i32
516 pub UNNECESSARY_CAST,
518 "cast to the same type, e.g. `x as i32` where `x: i32`"
521 /// Returns the size in bits of an integral type.
522 /// Will return 0 if the type is not an int or uint variant
523 fn int_ty_to_nbits(typ: Ty, tcx: TyCtxt) -> u64 {
525 ty::TyInt(i) => match i {
526 IntTy::Is => tcx.data_layout.pointer_size.bits(),
533 ty::TyUint(i) => match i {
534 UintTy::Us => tcx.data_layout.pointer_size.bits(),
545 fn is_isize_or_usize(typ: Ty) -> bool {
547 ty::TyInt(IntTy::Is) | ty::TyUint(UintTy::Us) => true,
552 fn span_precision_loss_lint(cx: &LateContext, expr: &Expr, cast_from: Ty, cast_to_f64: bool) {
553 let mantissa_nbits = if cast_to_f64 { 52 } else { 23 };
554 let arch_dependent = is_isize_or_usize(cast_from) && cast_to_f64;
555 let arch_dependent_str = "on targets with 64-bit wide pointers ";
556 let from_nbits_str = if arch_dependent {
558 } else if is_isize_or_usize(cast_from) {
559 "32 or 64".to_owned()
561 int_ty_to_nbits(cast_from, cx.tcx).to_string()
568 "casting {0} to {1} causes a loss of precision {2}({0} is {3} bits wide, but {1}'s mantissa \
569 is only {4} bits wide)",
571 if cast_to_f64 { "f64" } else { "f32" },
583 fn span_lossless_lint(cx: &LateContext, expr: &Expr, op: &Expr, cast_from: Ty, cast_to: Ty) {
584 // The suggestion is to use a function call, so if the original expression
585 // has parens on the outside, they are no longer needed.
586 let opt = snippet_opt(cx, op.span);
587 let sugg = if let Some(ref snip) = opt {
588 if snip.starts_with('(') && snip.ends_with(')') {
589 &snip[1..snip.len()-1]
601 &format!("casting {} to {} may become silently lossy if types change", cast_from, cast_to),
603 format!("{}::from({})", cast_to, sugg),
613 fn check_truncation_and_wrapping(cx: &LateContext, expr: &Expr, cast_from: Ty, cast_to: Ty) {
614 let arch_64_suffix = " on targets with 64-bit wide pointers";
615 let arch_32_suffix = " on targets with 32-bit wide pointers";
616 let cast_unsigned_to_signed = !cast_from.is_signed() && cast_to.is_signed();
617 let from_nbits = int_ty_to_nbits(cast_from, cx.tcx);
618 let to_nbits = int_ty_to_nbits(cast_to, cx.tcx);
619 let (span_truncation, suffix_truncation, span_wrap, suffix_wrap) =
620 match (is_isize_or_usize(cast_from), is_isize_or_usize(cast_to)) {
621 (true, true) | (false, false) => (
622 to_nbits < from_nbits,
624 to_nbits == from_nbits && cast_unsigned_to_signed,
634 to_nbits <= 32 && cast_unsigned_to_signed,
640 cast_unsigned_to_signed,
641 if from_nbits == 64 {
651 CAST_POSSIBLE_TRUNCATION,
654 "casting {} to {} may truncate the value{}",
657 match suffix_truncation {
658 ArchSuffix::_32 => arch_32_suffix,
659 ArchSuffix::_64 => arch_64_suffix,
660 ArchSuffix::None => "",
671 "casting {} to {} may wrap around the value{}",
675 ArchSuffix::_32 => arch_32_suffix,
676 ArchSuffix::_64 => arch_64_suffix,
677 ArchSuffix::None => "",
684 fn check_lossless(cx: &LateContext, expr: &Expr, op: &Expr, cast_from: Ty, cast_to: Ty) {
685 let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed();
686 let from_nbits = int_ty_to_nbits(cast_from, cx.tcx);
687 let to_nbits = int_ty_to_nbits(cast_to, cx.tcx);
688 if !is_isize_or_usize(cast_from) && !is_isize_or_usize(cast_to) && from_nbits < to_nbits && !cast_signed_to_unsigned
690 span_lossless_lint(cx, expr, op, cast_from, cast_to);
694 impl LintPass for CastPass {
695 fn get_lints(&self) -> LintArray {
699 CAST_POSSIBLE_TRUNCATION,
707 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for CastPass {
708 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
709 if let ExprCast(ref ex, _) = expr.node {
710 let (cast_from, cast_to) = (cx.tables.expr_ty(ex), cx.tables.expr_ty(expr));
711 if let ExprLit(ref lit) = ex.node {
712 use syntax::ast::{LitIntType, LitKind};
714 LitKind::Int(_, LitIntType::Unsuffixed) | LitKind::FloatUnsuffixed(_) => {},
715 _ => if cast_from.sty == cast_to.sty && !in_external_macro(cx, expr.span) {
720 &format!("casting to the same type is unnecessary (`{}` -> `{}`)", cast_from, cast_to),
725 if cast_from.is_numeric() && cast_to.is_numeric() && !in_external_macro(cx, expr.span) {
726 match (cast_from.is_integral(), cast_to.is_integral()) {
728 let from_nbits = int_ty_to_nbits(cast_from, cx.tcx);
729 let to_nbits = if let ty::TyFloat(FloatTy::F32) = cast_to.sty {
734 if is_isize_or_usize(cast_from) || from_nbits >= to_nbits {
735 span_precision_loss_lint(cx, expr, cast_from, to_nbits == 64);
737 if from_nbits < to_nbits {
738 span_lossless_lint(cx, expr, ex, cast_from, cast_to);
744 CAST_POSSIBLE_TRUNCATION,
746 &format!("casting {} to {} may truncate the value", cast_from, cast_to),
748 if !cast_to.is_signed() {
753 &format!("casting {} to {} may lose the sign of the value", cast_from, cast_to),
758 if cast_from.is_signed() && !cast_to.is_signed() {
763 &format!("casting {} to {} may lose the sign of the value", cast_from, cast_to),
766 check_truncation_and_wrapping(cx, expr, cast_from, cast_to);
767 check_lossless(cx, expr, ex, cast_from, cast_to);
770 if let (&ty::TyFloat(FloatTy::F64), &ty::TyFloat(FloatTy::F32)) = (&cast_from.sty, &cast_to.sty)
774 CAST_POSSIBLE_TRUNCATION,
776 "casting f64 to f32 may truncate the value",
779 if let (&ty::TyFloat(FloatTy::F32), &ty::TyFloat(FloatTy::F64)) = (&cast_from.sty, &cast_to.sty)
781 span_lossless_lint(cx, expr, ex, cast_from, cast_to);
790 /// **What it does:** Checks for types used in structs, parameters and `let`
791 /// declarations above a certain complexity threshold.
793 /// **Why is this bad?** Too complex types make the code less readable. Consider
794 /// using a `type` definition to simplify them.
796 /// **Known problems:** None.
800 /// struct Foo { inner: Rc<Vec<Vec<Box<(u32, u32, u32, u32)>>>> }
805 "usage of very complex types that might be better factored into `type` definitions"
808 #[allow(missing_copy_implementations)]
809 pub struct TypeComplexityPass {
813 impl TypeComplexityPass {
814 pub fn new(threshold: u64) -> Self {
816 threshold: threshold,
821 impl LintPass for TypeComplexityPass {
822 fn get_lints(&self) -> LintArray {
823 lint_array!(TYPE_COMPLEXITY)
827 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeComplexityPass {
830 cx: &LateContext<'a, 'tcx>,
837 self.check_fndecl(cx, decl);
840 fn check_struct_field(&mut self, cx: &LateContext<'a, 'tcx>, field: &'tcx StructField) {
841 // enum variants are also struct fields now
842 self.check_type(cx, &field.ty);
845 fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx Item) {
847 ItemStatic(ref ty, _, _) | ItemConst(ref ty, _) => self.check_type(cx, ty),
848 // functions, enums, structs, impls and traits are covered
853 fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx TraitItem) {
855 TraitItemKind::Const(ref ty, _) | TraitItemKind::Type(_, Some(ref ty)) => self.check_type(cx, ty),
856 TraitItemKind::Method(MethodSig { ref decl, .. }, TraitMethod::Required(_)) => self.check_fndecl(cx, decl),
857 // methods with default impl are covered by check_fn
862 fn check_impl_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx ImplItem) {
864 ImplItemKind::Const(ref ty, _) | ImplItemKind::Type(ref ty) => self.check_type(cx, ty),
865 // methods are covered by check_fn
870 fn check_local(&mut self, cx: &LateContext<'a, 'tcx>, local: &'tcx Local) {
871 if let Some(ref ty) = local.ty {
872 self.check_type(cx, ty);
877 impl<'a, 'tcx> TypeComplexityPass {
878 fn check_fndecl(&self, cx: &LateContext<'a, 'tcx>, decl: &'tcx FnDecl) {
879 for arg in &decl.inputs {
880 self.check_type(cx, arg);
882 if let Return(ref ty) = decl.output {
883 self.check_type(cx, ty);
887 fn check_type(&self, cx: &LateContext, ty: &hir::Ty) {
888 if in_macro(ty.span) {
892 let mut visitor = TypeComplexityVisitor { score: 0, nest: 1 };
893 visitor.visit_ty(ty);
897 if score > self.threshold {
902 "very complex type used. Consider factoring parts into `type` definitions",
908 /// Walks a type and assigns a complexity score to it.
909 struct TypeComplexityVisitor {
910 /// total complexity score of the type
912 /// current nesting level
916 impl<'tcx> Visitor<'tcx> for TypeComplexityVisitor {
917 fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
918 let (add_score, sub_nest) = match ty.node {
919 // _, &x and *x have only small overhead; don't mess with nesting level
920 TyInfer | TyPtr(..) | TyRptr(..) => (1, 0),
922 // the "normal" components of a type: named types, arrays/tuples
923 TyPath(..) | TySlice(..) | TyTup(..) | TyArray(..) => (10 * self.nest, 1),
925 // function types bring a lot of overhead
926 TyBareFn(..) => (50 * self.nest, 1),
928 TyTraitObject(ref param_bounds, _) => {
929 let has_lifetime_parameters = param_bounds
931 .any(|bound| !bound.bound_lifetimes.is_empty());
932 if has_lifetime_parameters {
933 // complex trait bounds like A<'a, 'b>
936 // simple trait bounds like A + B
943 self.score += add_score;
944 self.nest += sub_nest;
946 self.nest -= sub_nest;
948 fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
949 NestedVisitorMap::None
953 /// **What it does:** Checks for expressions where a character literal is cast
954 /// to `u8` and suggests using a byte literal instead.
956 /// **Why is this bad?** In general, casting values to smaller types is
957 /// error-prone and should be avoided where possible. In the particular case of
958 /// converting a character literal to u8, it is easy to avoid by just using a
959 /// byte literal instead. As an added bonus, `b'a'` is even slightly shorter
960 /// than `'a' as u8`.
962 /// **Known problems:** None.
971 "casting a character literal to u8"
974 pub struct CharLitAsU8;
976 impl LintPass for CharLitAsU8 {
977 fn get_lints(&self) -> LintArray {
978 lint_array!(CHAR_LIT_AS_U8)
982 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for CharLitAsU8 {
983 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
984 use syntax::ast::{LitKind, UintTy};
986 if let ExprCast(ref e, _) = expr.node {
987 if let ExprLit(ref l) = e.node {
988 if let LitKind::Char(_) = l.node {
989 if ty::TyUint(UintTy::U8) == cx.tables.expr_ty(expr).sty && !in_macro(expr.span) {
990 let msg = "casting character literal to u8. `char`s \
991 are 4 bytes wide in rust, so casting to u8 \
993 let help = format!("Consider using a byte literal instead:\nb{}", snippet(cx, e.span, "'x'"));
994 span_help_and_lint(cx, CHAR_LIT_AS_U8, expr.span, msg, &help);
1002 /// **What it does:** Checks for comparisons where one side of the relation is
1003 /// either the minimum or maximum value for its type and warns if it involves a
1004 /// case that is always true or always false. Only integer and boolean types are
1007 /// **Why is this bad?** An expression like `min <= x` may misleadingly imply
1008 /// that is is possible for `x` to be less than the minimum. Expressions like
1009 /// `max < x` are probably mistakes.
1011 /// **Known problems:** For `usize` the size of the current compile target will
1012 /// be assumed (e.g. 64 bits on 64 bit systems). This means code that uses such
1013 /// a comparison to detect target pointer width will trigger this lint. One can
1014 /// use `mem::sizeof` and compare its value or conditional compilation
1016 /// like `#[cfg(target_pointer_width = "64")] ..` instead.
1021 /// 100 > std::i32::MAX
1024 pub ABSURD_EXTREME_COMPARISONS,
1026 "a comparison with a maximum or minimum value that is always true or false"
1029 pub struct AbsurdExtremeComparisons;
1031 impl LintPass for AbsurdExtremeComparisons {
1032 fn get_lints(&self) -> LintArray {
1033 lint_array!(ABSURD_EXTREME_COMPARISONS)
1042 struct ExtremeExpr<'a> {
1047 enum AbsurdComparisonResult {
1050 InequalityImpossible,
1055 fn detect_absurd_comparison<'a>(
1060 ) -> Option<(ExtremeExpr<'a>, AbsurdComparisonResult)> {
1061 use types::ExtremeType::*;
1062 use types::AbsurdComparisonResult::*;
1063 use utils::comparisons::*;
1065 // absurd comparison only makes sense on primitive types
1066 // primitive types don't implement comparison operators with each other
1067 if cx.tables.expr_ty(lhs) != cx.tables.expr_ty(rhs) {
1071 let normalized = normalize_comparison(op, lhs, rhs);
1072 let (rel, normalized_lhs, normalized_rhs) = if let Some(val) = normalized {
1078 let lx = detect_extreme_expr(cx, normalized_lhs);
1079 let rx = detect_extreme_expr(cx, normalized_rhs);
1084 (Some(l @ ExtremeExpr { which: Maximum, .. }), _) => (l, AlwaysFalse), // max < x
1085 (_, Some(r @ ExtremeExpr { which: Minimum, .. })) => (r, AlwaysFalse), // x < min
1091 (Some(l @ ExtremeExpr { which: Minimum, .. }), _) => (l, AlwaysTrue), // min <= x
1092 (Some(l @ ExtremeExpr { which: Maximum, .. }), _) => (l, InequalityImpossible), // max <= x
1093 (_, Some(r @ ExtremeExpr { which: Minimum, .. })) => (r, InequalityImpossible), // x <= min
1094 (_, Some(r @ ExtremeExpr { which: Maximum, .. })) => (r, AlwaysTrue), // x <= max
1098 Rel::Ne | Rel::Eq => return None,
1102 fn detect_extreme_expr<'a>(cx: &LateContext, expr: &'a Expr) -> Option<ExtremeExpr<'a>> {
1103 use rustc::middle::const_val::ConstVal::*;
1104 use rustc_const_math::*;
1105 use rustc_const_eval::*;
1106 use types::ExtremeType::*;
1108 let ty = cx.tables.expr_ty(expr);
1111 ty::TyBool | ty::TyInt(_) | ty::TyUint(_) => (),
1115 let parent_item = cx.tcx.hir.get_parent(expr.id);
1116 let parent_def_id = cx.tcx.hir.local_def_id(parent_item);
1117 let substs = Substs::identity_for_item(cx.tcx, parent_def_id);
1118 let cv = match ConstContext::new(cx.tcx, cx.param_env.and(substs), cx.tables).eval(expr) {
1120 Err(_) => return None,
1123 let which = match (&ty.sty, cv) {
1124 (&ty::TyBool, Bool(false)) |
1125 (&ty::TyInt(IntTy::Is), Integral(Isize(Is32(::std::i32::MIN)))) |
1126 (&ty::TyInt(IntTy::Is), Integral(Isize(Is64(::std::i64::MIN)))) |
1127 (&ty::TyInt(IntTy::I8), Integral(I8(::std::i8::MIN))) |
1128 (&ty::TyInt(IntTy::I16), Integral(I16(::std::i16::MIN))) |
1129 (&ty::TyInt(IntTy::I32), Integral(I32(::std::i32::MIN))) |
1130 (&ty::TyInt(IntTy::I64), Integral(I64(::std::i64::MIN))) |
1131 (&ty::TyInt(IntTy::I128), Integral(I128(::std::i128::MIN))) |
1132 (&ty::TyUint(UintTy::Us), Integral(Usize(Us32(::std::u32::MIN)))) |
1133 (&ty::TyUint(UintTy::Us), Integral(Usize(Us64(::std::u64::MIN)))) |
1134 (&ty::TyUint(UintTy::U8), Integral(U8(::std::u8::MIN))) |
1135 (&ty::TyUint(UintTy::U16), Integral(U16(::std::u16::MIN))) |
1136 (&ty::TyUint(UintTy::U32), Integral(U32(::std::u32::MIN))) |
1137 (&ty::TyUint(UintTy::U64), Integral(U64(::std::u64::MIN))) |
1138 (&ty::TyUint(UintTy::U128), Integral(U128(::std::u128::MIN))) => Minimum,
1140 (&ty::TyBool, Bool(true)) |
1141 (&ty::TyInt(IntTy::Is), Integral(Isize(Is32(::std::i32::MAX)))) |
1142 (&ty::TyInt(IntTy::Is), Integral(Isize(Is64(::std::i64::MAX)))) |
1143 (&ty::TyInt(IntTy::I8), Integral(I8(::std::i8::MAX))) |
1144 (&ty::TyInt(IntTy::I16), Integral(I16(::std::i16::MAX))) |
1145 (&ty::TyInt(IntTy::I32), Integral(I32(::std::i32::MAX))) |
1146 (&ty::TyInt(IntTy::I64), Integral(I64(::std::i64::MAX))) |
1147 (&ty::TyInt(IntTy::I128), Integral(I128(::std::i128::MAX))) |
1148 (&ty::TyUint(UintTy::Us), Integral(Usize(Us32(::std::u32::MAX)))) |
1149 (&ty::TyUint(UintTy::Us), Integral(Usize(Us64(::std::u64::MAX)))) |
1150 (&ty::TyUint(UintTy::U8), Integral(U8(::std::u8::MAX))) |
1151 (&ty::TyUint(UintTy::U16), Integral(U16(::std::u16::MAX))) |
1152 (&ty::TyUint(UintTy::U32), Integral(U32(::std::u32::MAX))) |
1153 (&ty::TyUint(UintTy::U64), Integral(U64(::std::u64::MAX))) |
1154 (&ty::TyUint(UintTy::U128), Integral(U128(::std::u128::MAX))) => Maximum,
1164 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for AbsurdExtremeComparisons {
1165 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1166 use types::ExtremeType::*;
1167 use types::AbsurdComparisonResult::*;
1169 if let ExprBinary(ref cmp, ref lhs, ref rhs) = expr.node {
1170 if let Some((culprit, result)) = detect_absurd_comparison(cx, cmp.node, lhs, rhs) {
1171 if !in_macro(expr.span) {
1172 let msg = "this comparison involving the minimum or maximum element for this \
1173 type contains a case that is always true or always false";
1175 let conclusion = match result {
1176 AlwaysFalse => "this comparison is always false".to_owned(),
1177 AlwaysTrue => "this comparison is always true".to_owned(),
1178 InequalityImpossible => format!(
1179 "the case where the two sides are not equal never occurs, consider using {} == {} \
1181 snippet(cx, lhs.span, "lhs"),
1182 snippet(cx, rhs.span, "rhs")
1187 "because {} is the {} value for this type, {}",
1188 snippet(cx, culprit.expr.span, "x"),
1189 match culprit.which {
1190 Minimum => "minimum",
1191 Maximum => "maximum",
1196 span_help_and_lint(cx, ABSURD_EXTREME_COMPARISONS, expr.span, msg, &help);
1203 /// **What it does:** Checks for comparisons where the relation is always either
1204 /// true or false, but where one side has been upcast so that the comparison is
1205 /// necessary. Only integer types are checked.
1207 /// **Why is this bad?** An expression like `let x : u8 = ...; (x as u32) > 300`
1208 /// will mistakenly imply that it is possible for `x` to be outside the range of
1211 /// **Known problems:**
1212 /// https://github.com/rust-lang-nursery/rust-clippy/issues/886
1216 /// let x : u8 = ...; (x as u32) > 300
1219 pub INVALID_UPCAST_COMPARISONS,
1221 "a comparison involving an upcast which is always true or false"
1224 pub struct InvalidUpcastComparisons;
1226 impl LintPass for InvalidUpcastComparisons {
1227 fn get_lints(&self) -> LintArray {
1228 lint_array!(INVALID_UPCAST_COMPARISONS)
1232 #[derive(Copy, Clone, Debug, Eq)]
1239 #[allow(cast_sign_loss)]
1240 fn cmp_s_u(s: i128, u: u128) -> Ordering {
1243 } else if u > (i128::max_value() as u128) {
1251 impl PartialEq for FullInt {
1252 fn eq(&self, other: &Self) -> bool {
1253 self.partial_cmp(other)
1254 .expect("partial_cmp only returns Some(_)") == Ordering::Equal
1258 impl PartialOrd for FullInt {
1259 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1260 Some(match (self, other) {
1261 (&FullInt::S(s), &FullInt::S(o)) => s.cmp(&o),
1262 (&FullInt::U(s), &FullInt::U(o)) => s.cmp(&o),
1263 (&FullInt::S(s), &FullInt::U(o)) => Self::cmp_s_u(s, o),
1264 (&FullInt::U(s), &FullInt::S(o)) => Self::cmp_s_u(o, s).reverse(),
1268 impl Ord for FullInt {
1269 fn cmp(&self, other: &Self) -> Ordering {
1270 self.partial_cmp(other)
1271 .expect("partial_cmp for FullInt can never return None")
1276 fn numeric_cast_precast_bounds<'a>(cx: &LateContext, expr: &'a Expr) -> Option<(FullInt, FullInt)> {
1277 use syntax::ast::{IntTy, UintTy};
1280 if let ExprCast(ref cast_exp, _) = expr.node {
1281 let pre_cast_ty = cx.tables.expr_ty(cast_exp);
1282 let cast_ty = cx.tables.expr_ty(expr);
1283 // if it's a cast from i32 to u32 wrapping will invalidate all these checks
1284 if type_size(cx, pre_cast_ty) == type_size(cx, cast_ty) {
1287 match pre_cast_ty.sty {
1288 ty::TyInt(int_ty) => Some(match int_ty {
1289 IntTy::I8 => (FullInt::S(i128::from(i8::min_value())), FullInt::S(i128::from(i8::max_value()))),
1291 FullInt::S(i128::from(i16::min_value())),
1292 FullInt::S(i128::from(i16::max_value())),
1295 FullInt::S(i128::from(i32::min_value())),
1296 FullInt::S(i128::from(i32::max_value())),
1299 FullInt::S(i128::from(i64::min_value())),
1300 FullInt::S(i128::from(i64::max_value())),
1302 IntTy::I128 => (FullInt::S(i128::min_value() as i128), FullInt::S(i128::max_value() as i128)),
1303 IntTy::Is => (FullInt::S(isize::min_value() as i128), FullInt::S(isize::max_value() as i128)),
1305 ty::TyUint(uint_ty) => Some(match uint_ty {
1306 UintTy::U8 => (FullInt::U(u128::from(u8::min_value())), FullInt::U(u128::from(u8::max_value()))),
1308 FullInt::U(u128::from(u16::min_value())),
1309 FullInt::U(u128::from(u16::max_value())),
1312 FullInt::U(u128::from(u32::min_value())),
1313 FullInt::U(u128::from(u32::max_value())),
1316 FullInt::U(u128::from(u64::min_value())),
1317 FullInt::U(u128::from(u64::max_value())),
1319 UintTy::U128 => (FullInt::U(u128::min_value() as u128), FullInt::U(u128::max_value() as u128)),
1320 UintTy::Us => (FullInt::U(usize::min_value() as u128), FullInt::U(usize::max_value() as u128)),
1329 #[allow(cast_possible_wrap)]
1330 fn node_as_const_fullint(cx: &LateContext, expr: &Expr) -> Option<FullInt> {
1331 use rustc::middle::const_val::ConstVal::*;
1332 use rustc_const_eval::ConstContext;
1334 let parent_item = cx.tcx.hir.get_parent(expr.id);
1335 let parent_def_id = cx.tcx.hir.local_def_id(parent_item);
1336 let substs = Substs::identity_for_item(cx.tcx, parent_def_id);
1337 match ConstContext::new(cx.tcx, cx.param_env.and(substs), cx.tables).eval(expr) {
1338 Ok(val) => if let Integral(const_int) = val {
1339 match const_int.int_type() {
1340 IntType::SignedInt(_) => Some(FullInt::S(const_int.to_u128_unchecked() as i128)),
1341 IntType::UnsignedInt(_) => Some(FullInt::U(const_int.to_u128_unchecked())),
1350 fn err_upcast_comparison(cx: &LateContext, span: &Span, expr: &Expr, always: bool) {
1351 if let ExprCast(ref cast_val, _) = expr.node {
1354 INVALID_UPCAST_COMPARISONS,
1357 "because of the numeric bounds on `{}` prior to casting, this expression is always {}",
1358 snippet(cx, cast_val.span, "the expression"),
1359 if always { "true" } else { "false" },
1365 fn upcast_comparison_bounds_err(
1368 rel: comparisons::Rel,
1369 lhs_bounds: Option<(FullInt, FullInt)>,
1374 use utils::comparisons::*;
1376 if let Some((lb, ub)) = lhs_bounds {
1377 if let Some(norm_rhs_val) = node_as_const_fullint(cx, rhs) {
1378 if rel == Rel::Eq || rel == Rel::Ne {
1379 if norm_rhs_val < lb || norm_rhs_val > ub {
1380 err_upcast_comparison(cx, span, lhs, rel == Rel::Ne);
1382 } else if match rel {
1383 Rel::Lt => if invert {
1388 Rel::Le => if invert {
1393 Rel::Eq | Rel::Ne => unreachable!(),
1395 err_upcast_comparison(cx, span, lhs, true)
1396 } else if match rel {
1397 Rel::Lt => if invert {
1402 Rel::Le => if invert {
1407 Rel::Eq | Rel::Ne => unreachable!(),
1409 err_upcast_comparison(cx, span, lhs, false)
1415 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidUpcastComparisons {
1416 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1417 if let ExprBinary(ref cmp, ref lhs, ref rhs) = expr.node {
1418 let normalized = comparisons::normalize_comparison(cmp.node, lhs, rhs);
1419 let (rel, normalized_lhs, normalized_rhs) = if let Some(val) = normalized {
1425 let lhs_bounds = numeric_cast_precast_bounds(cx, normalized_lhs);
1426 let rhs_bounds = numeric_cast_precast_bounds(cx, normalized_rhs);
1428 upcast_comparison_bounds_err(cx, &expr.span, rel, lhs_bounds, normalized_lhs, normalized_rhs, false);
1429 upcast_comparison_bounds_err(cx, &expr.span, rel, rhs_bounds, normalized_rhs, normalized_lhs, true);