3 use rustc::hir::intravisit::{FnKind, Visitor, walk_ty, NestedVisitorMap};
6 use std::cmp::Ordering;
7 use syntax::ast::{IntTy, UintTy, FloatTy};
8 use syntax::codemap::Span;
9 use utils::{comparisons, higher, in_external_macro, in_macro, match_def_path, snippet, span_help_and_lint, span_lint,
10 opt_def_id, last_path_segment};
13 /// Handles all the linting of funky types
14 #[allow(missing_copy_implementations)]
17 /// **What it does:** Checks for use of `Box<Vec<_>>` anywhere in the code.
19 /// **Why is this bad?** `Vec` already keeps its contents in a separate area on
20 /// the heap. So if you `Box` it, you just add another level of indirection
21 /// without any benefit whatsoever.
23 /// **Known problems:** None.
28 /// values: Box<Vec<Foo>>,
34 "usage of `Box<Vec<T>>`, vector elements are already on the heap"
37 /// **What it does:** Checks for usage of any `LinkedList`, suggesting to use a
38 /// `Vec` or a `VecDeque` (formerly called `RingBuf`).
40 /// **Why is this bad?** Gankro says:
42 /// > The TL;DR of `LinkedList` is that it's built on a massive amount of pointers and indirection.
43 /// > It wastes memory, it has terrible cache locality, and is all-around slow. `RingBuf`, while
44 /// > "only" amortized for push/pop, should be faster in the general case for almost every possible
45 /// > workload, and isn't even amortized at all if you can predict the capacity you need.
47 /// > `LinkedList`s are only really good if you're doing a lot of merging or splitting of lists.
48 /// > This is because they can just mangle some pointers instead of actually copying the data. Even
49 /// > if you're doing a lot of insertion in the middle of the list, `RingBuf` can still be better
50 /// > because of how expensive it is to seek to the middle of a `LinkedList`.
52 /// **Known problems:** False positives – the instances where using a
53 /// `LinkedList` makes sense are few and far between, but they can still happen.
57 /// let x = LinkedList::new();
62 "usage of LinkedList, usually a vector is faster, or a more specialized data \
63 structure like a VecDeque"
66 impl LintPass for TypePass {
67 fn get_lints(&self) -> LintArray {
68 lint_array!(BOX_VEC, LINKEDLIST)
72 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypePass {
73 fn check_fn(&mut self, cx: &LateContext, _: FnKind, decl: &FnDecl, _: &Body, _: Span, id: NodeId) {
74 // skip trait implementations, see #605
75 if let Some(map::NodeItem(item)) = cx.tcx.hir.find(cx.tcx.hir.get_parent(id)) {
76 if let ItemImpl(_, _, _, Some(..), _, _) = item.node {
81 check_fn_decl(cx, decl);
84 fn check_struct_field(&mut self, cx: &LateContext, field: &StructField) {
85 check_ty(cx, &field.ty);
88 fn check_trait_item(&mut self, cx: &LateContext, item: &TraitItem) {
90 TraitItemKind::Const(ref ty, _) |
91 TraitItemKind::Type(_, Some(ref ty)) => check_ty(cx, ty),
92 TraitItemKind::Method(ref sig, _) => check_fn_decl(cx, &sig.decl),
98 fn check_fn_decl(cx: &LateContext, decl: &FnDecl) {
99 for input in &decl.inputs {
103 if let FunctionRetTy::Return(ref ty) = decl.output {
108 fn check_ty(cx: &LateContext, ast_ty: &Ty) {
109 if in_macro(cx, ast_ty.span) {
113 TyPath(ref qpath) => {
114 let def = cx.tables.qpath_def(qpath, ast_ty.id);
115 if let Some(def_id) = opt_def_id(def) {
116 if Some(def_id) == cx.tcx.lang_items.owned_box() {
117 let last = last_path_segment(qpath);
119 let PathParameters::AngleBracketedParameters(ref ag) = last.parameters,
120 let Some(ref vec) = ag.types.get(0),
121 let TyPath(ref qpath) = vec.node,
122 let def::Def::Struct(..) = cx.tables.qpath_def(qpath, vec.id),
123 let Some(did) = opt_def_id(cx.tables.qpath_def(qpath, vec.id)),
124 match_def_path(cx.tcx, did, &paths::VEC),
126 span_help_and_lint(cx,
129 "you seem to be trying to use `Box<Vec<T>>`. Consider using just `Vec<T>`",
130 "`Vec<T>` is already on the heap, `Box<Vec<T>>` makes an extra allocation.");
131 return; // don't recurse into the type
133 } else if match_def_path(cx.tcx, def_id, &paths::LINKED_LIST) {
134 span_help_and_lint(cx,
137 "I see you're using a LinkedList! Perhaps you meant some other data structure?",
138 "a VecDeque might work");
139 return; // don't recurse into the type
143 QPath::Resolved(Some(ref ty), ref p) => {
145 for ty in p.segments.iter().flat_map(|seg| seg.parameters.types()) {
149 QPath::Resolved(None, ref p) => {
150 for ty in p.segments.iter().flat_map(|seg| seg.parameters.types()) {
154 QPath::TypeRelative(ref ty, ref seg) => {
156 for ty in seg.parameters.types() {
165 TyPtr(MutTy { ref ty, .. }) |
166 TyRptr(_, MutTy { ref ty, .. }) => check_ty(cx, ty),
176 #[allow(missing_copy_implementations)]
179 /// **What it does:** Checks for binding a unit value.
181 /// **Why is this bad?** A unit value cannot usefully be used anywhere. So
182 /// binding one is kind of pointless.
184 /// **Known problems:** None.
193 "creating a let binding to a value of unit type, which usually can't be used afterwards"
196 fn check_let_unit(cx: &LateContext, decl: &Decl) {
197 if let DeclLocal(ref local) = decl.node {
198 let bindtype = &cx.tables.pat_ty(&local.pat).sty;
200 ty::TyTuple(slice) if slice.is_empty() => {
201 if in_external_macro(cx, decl.span) || in_macro(cx, local.pat.span) {
204 if higher::is_from_for_desugar(decl) {
210 &format!("this let-binding has unit value. Consider omitting `let {} =`",
211 snippet(cx, local.pat.span, "..")));
218 impl LintPass for LetPass {
219 fn get_lints(&self) -> LintArray {
220 lint_array!(LET_UNIT_VALUE)
224 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for LetPass {
225 fn check_decl(&mut self, cx: &LateContext<'a, 'tcx>, decl: &'tcx Decl) {
226 check_let_unit(cx, decl)
230 /// **What it does:** Checks for comparisons to unit.
232 /// **Why is this bad?** Unit is always equal to itself, and thus is just a
233 /// clumsily written constant. Mostly this happens when someone accidentally
234 /// adds semicolons at the end of the operands.
236 /// **Known problems:** None.
240 /// if { foo(); } == { bar(); } { baz(); }
244 /// { foo(); bar(); baz(); }
249 "comparing unit values"
252 #[allow(missing_copy_implementations)]
255 impl LintPass for UnitCmp {
256 fn get_lints(&self) -> LintArray {
257 lint_array!(UNIT_CMP)
261 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnitCmp {
262 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
263 if in_macro(cx, expr.span) {
266 if let ExprBinary(ref cmp, ref left, _) = expr.node {
268 if op.is_comparison() {
269 let sty = &cx.tables.expr_ty(left).sty;
271 ty::TyTuple(slice) if slice.is_empty() => {
272 let result = match op {
273 BiEq | BiLe | BiGe => "true",
279 &format!("{}-comparison of unit values detected. This will always be {}",
292 /// **What it does:** Checks for casts from any numerical to a float type where
293 /// the receiving type cannot store all values from the original type without
294 /// rounding errors. This possible rounding is to be expected, so this lint is
295 /// `Allow` by default.
297 /// Basically, this warns on casting any integer with 32 or more bits to `f32`
298 /// or any 64-bit integer to `f64`.
300 /// **Why is this bad?** It's not bad at all. But in some applications it can be
301 /// helpful to know where precision loss can take place. This lint can help find
302 /// those places in the code.
304 /// **Known problems:** None.
308 /// let x = u64::MAX; x as f64
311 pub CAST_PRECISION_LOSS,
313 "casts that cause loss of precision, e.g `x as f32` where `x: u64`"
316 /// **What it does:** Checks for casts from a signed to an unsigned numerical
317 /// type. In this case, negative values wrap around to large positive values,
318 /// which can be quite surprising in practice. However, as the cast works as
319 /// defined, this lint is `Allow` by default.
321 /// **Why is this bad?** Possibly surprising results. You can activate this lint
322 /// as a one-time check to see where numerical wrapping can arise.
324 /// **Known problems:** None.
329 /// y as u128 // will return 18446744073709551615
334 "casts from signed types to unsigned types, e.g `x as u32` where `x: i32`"
337 /// **What it does:** Checks for on casts between numerical types that may
338 /// truncate large values. This is expected behavior, so the cast is `Allow` by
341 /// **Why is this bad?** In some problem domains, it is good practice to avoid
342 /// truncation. This lint can be activated to help assess where additional
343 /// checks could be beneficial.
345 /// **Known problems:** None.
349 /// fn as_u8(x: u64) -> u8 { x as u8 }
352 pub CAST_POSSIBLE_TRUNCATION,
354 "casts that may cause truncation of the value, e.g `x as u8` where `x: u32`, \
355 or `x as i32` where `x: f32`"
358 /// **What it does:** Checks for casts from an unsigned type to a signed type of
359 /// the same size. Performing such a cast is a 'no-op' for the compiler,
360 /// i.e. nothing is changed at the bit level, and the binary representation of
361 /// the value is reinterpreted. This can cause wrapping if the value is too big
362 /// for the target signed type. However, the cast works as defined, so this lint
363 /// is `Allow` by default.
365 /// **Why is this bad?** While such a cast is not bad in itself, the results can
366 /// be surprising when this is not the intended behavior, as demonstrated by the
369 /// **Known problems:** None.
373 /// u32::MAX as i32 // will yield a value of `-1`
376 pub CAST_POSSIBLE_WRAP,
378 "casts that may cause wrapping around the value, e.g `x as i32` where `x: u32` \
382 /// Returns the size in bits of an integral type.
383 /// Will return 0 if the type is not an int or uint variant
384 fn int_ty_to_nbits(typ: &ty::TyS) -> usize {
385 let n = match typ.sty {
386 ty::TyInt(i) => 4 << (i as usize),
387 ty::TyUint(u) => 4 << (u as usize),
390 // n == 4 is the usize/isize case
392 ::std::mem::size_of::<usize>() * 8
398 fn is_isize_or_usize(typ: &ty::TyS) -> bool {
400 ty::TyInt(IntTy::Is) |
401 ty::TyUint(UintTy::Us) => true,
406 fn span_precision_loss_lint(cx: &LateContext, expr: &Expr, cast_from: &ty::TyS, cast_to_f64: bool) {
407 let mantissa_nbits = if cast_to_f64 { 52 } else { 23 };
408 let arch_dependent = is_isize_or_usize(cast_from) && cast_to_f64;
409 let arch_dependent_str = "on targets with 64-bit wide pointers ";
410 let from_nbits_str = if arch_dependent {
412 } else if is_isize_or_usize(cast_from) {
413 "32 or 64".to_owned()
415 int_ty_to_nbits(cast_from).to_string()
420 &format!("casting {0} to {1} causes a loss of precision {2}({0} is {3} bits wide, but {1}'s mantissa \
421 is only {4} bits wide)",
423 if cast_to_f64 { "f64" } else { "f32" },
439 fn check_truncation_and_wrapping(cx: &LateContext, expr: &Expr, cast_from: &ty::TyS, cast_to: &ty::TyS) {
440 let arch_64_suffix = " on targets with 64-bit wide pointers";
441 let arch_32_suffix = " on targets with 32-bit wide pointers";
442 let cast_unsigned_to_signed = !cast_from.is_signed() && cast_to.is_signed();
443 let (from_nbits, to_nbits) = (int_ty_to_nbits(cast_from), int_ty_to_nbits(cast_to));
444 let (span_truncation, suffix_truncation, span_wrap, suffix_wrap) = match (is_isize_or_usize(cast_from),
445 is_isize_or_usize(cast_to)) {
446 (true, true) | (false, false) => {
447 (to_nbits < from_nbits,
449 to_nbits == from_nbits && cast_unsigned_to_signed,
459 to_nbits <= 32 && cast_unsigned_to_signed,
465 cast_unsigned_to_signed,
466 if from_nbits == 64 {
475 CAST_POSSIBLE_TRUNCATION,
477 &format!("casting {} to {} may truncate the value{}",
480 match suffix_truncation {
481 ArchSuffix::_32 => arch_32_suffix,
482 ArchSuffix::_64 => arch_64_suffix,
483 ArchSuffix::None => "",
490 &format!("casting {} to {} may wrap around the value{}",
494 ArchSuffix::_32 => arch_32_suffix,
495 ArchSuffix::_64 => arch_64_suffix,
496 ArchSuffix::None => "",
501 impl LintPass for CastPass {
502 fn get_lints(&self) -> LintArray {
503 lint_array!(CAST_PRECISION_LOSS,
505 CAST_POSSIBLE_TRUNCATION,
510 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for CastPass {
511 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
512 if let ExprCast(ref ex, _) = expr.node {
513 let (cast_from, cast_to) = (cx.tables.expr_ty(ex), cx.tables.expr_ty(expr));
514 if cast_from.is_numeric() && cast_to.is_numeric() && !in_external_macro(cx, expr.span) {
515 match (cast_from.is_integral(), cast_to.is_integral()) {
517 let from_nbits = int_ty_to_nbits(cast_from);
518 let to_nbits = if let ty::TyFloat(FloatTy::F32) = cast_to.sty {
523 if is_isize_or_usize(cast_from) || from_nbits >= to_nbits {
524 span_precision_loss_lint(cx, expr, cast_from, to_nbits == 64);
529 CAST_POSSIBLE_TRUNCATION,
531 &format!("casting {} to {} may truncate the value", cast_from, cast_to));
532 if !cast_to.is_signed() {
536 &format!("casting {} to {} may lose the sign of the value", cast_from, cast_to));
540 if cast_from.is_signed() && !cast_to.is_signed() {
544 &format!("casting {} to {} may lose the sign of the value", cast_from, cast_to));
546 check_truncation_and_wrapping(cx, expr, cast_from, cast_to);
549 if let (&ty::TyFloat(FloatTy::F64), &ty::TyFloat(FloatTy::F32)) =
550 (&cast_from.sty, &cast_to.sty) {
552 CAST_POSSIBLE_TRUNCATION,
554 "casting f64 to f32 may truncate the value");
563 /// **What it does:** Checks for types used in structs, parameters and `let`
564 /// declarations above a certain complexity threshold.
566 /// **Why is this bad?** Too complex types make the code less readable. Consider
567 /// using a `type` definition to simplify them.
569 /// **Known problems:** None.
573 /// struct Foo { inner: Rc<Vec<Vec<Box<(u32, u32, u32, u32)>>>> }
578 "usage of very complex types that might be better factored into `type` definitions"
581 #[allow(missing_copy_implementations)]
582 pub struct TypeComplexityPass {
586 impl TypeComplexityPass {
587 pub fn new(threshold: u64) -> Self {
588 TypeComplexityPass { threshold: threshold }
592 impl LintPass for TypeComplexityPass {
593 fn get_lints(&self) -> LintArray {
594 lint_array!(TYPE_COMPLEXITY)
598 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeComplexityPass {
601 cx: &LateContext<'a, 'tcx>,
608 self.check_fndecl(cx, decl);
611 fn check_struct_field(&mut self, cx: &LateContext<'a, 'tcx>, field: &'tcx StructField) {
612 // enum variants are also struct fields now
613 self.check_type(cx, &field.ty);
616 fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx Item) {
618 ItemStatic(ref ty, _, _) |
619 ItemConst(ref ty, _) => self.check_type(cx, ty),
620 // functions, enums, structs, impls and traits are covered
625 fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx TraitItem) {
627 TraitItemKind::Const(ref ty, _) |
628 TraitItemKind::Type(_, Some(ref ty)) => self.check_type(cx, ty),
629 TraitItemKind::Method(MethodSig { ref decl, .. }, TraitMethod::Required(_)) => self.check_fndecl(cx, decl),
630 // methods with default impl are covered by check_fn
635 fn check_impl_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx ImplItem) {
637 ImplItemKind::Const(ref ty, _) |
638 ImplItemKind::Type(ref ty) => self.check_type(cx, ty),
639 // methods are covered by check_fn
644 fn check_local(&mut self, cx: &LateContext<'a, 'tcx>, local: &'tcx Local) {
645 if let Some(ref ty) = local.ty {
646 self.check_type(cx, ty);
651 impl<'a, 'tcx> TypeComplexityPass {
652 fn check_fndecl(&self, cx: &LateContext<'a, 'tcx>, decl: &'tcx FnDecl) {
653 for arg in &decl.inputs {
654 self.check_type(cx, arg);
656 if let Return(ref ty) = decl.output {
657 self.check_type(cx, ty);
661 fn check_type(&self, cx: &LateContext<'a, 'tcx>, ty: &'tcx Ty) {
662 if in_macro(cx, ty.span) {
666 let mut visitor = TypeComplexityVisitor {
671 visitor.visit_ty(ty);
675 if score > self.threshold {
679 "very complex type used. Consider factoring parts into `type` definitions");
684 /// Walks a type and assigns a complexity score to it.
685 struct TypeComplexityVisitor<'a, 'tcx: 'a> {
686 /// total complexity score of the type
688 /// current nesting level
690 cx: &'a LateContext<'a, 'tcx>,
693 impl<'a, 'tcx: 'a> Visitor<'tcx> for TypeComplexityVisitor<'a, 'tcx> {
694 fn visit_ty(&mut self, ty: &'tcx Ty) {
695 let (add_score, sub_nest) = match ty.node {
696 // _, &x and *x have only small overhead; don't mess with nesting level
697 TyInfer | TyPtr(..) | TyRptr(..) => (1, 0),
699 // the "normal" components of a type: named types, arrays/tuples
700 TyPath(..) | TySlice(..) | TyTup(..) | TyArray(..) => (10 * self.nest, 1),
702 // function types bring a lot of overhead
703 TyBareFn(..) => (50 * self.nest, 1),
705 TyTraitObject(ref bounds) => {
706 let has_lifetimes = bounds.iter()
707 .any(|bound| match *bound {
708 TraitTyParamBound(ref poly_trait, ..) => !poly_trait.bound_lifetimes.is_empty(),
709 RegionTyParamBound(..) => true,
712 // complex trait bounds like A<'a, 'b>
715 // simple trait bounds like A + B
722 self.score += add_score;
723 self.nest += sub_nest;
725 self.nest -= sub_nest;
727 fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
728 NestedVisitorMap::All(&self.cx.tcx.hir)
732 /// **What it does:** Checks for expressions where a character literal is cast
733 /// to `u8` and suggests using a byte literal instead.
735 /// **Why is this bad?** In general, casting values to smaller types is
736 /// error-prone and should be avoided where possible. In the particular case of
737 /// converting a character literal to u8, it is easy to avoid by just using a
738 /// byte literal instead. As an added bonus, `b'a'` is even slightly shorter
739 /// than `'a' as u8`.
741 /// **Known problems:** None.
750 "casting a character literal to u8"
753 pub struct CharLitAsU8;
755 impl LintPass for CharLitAsU8 {
756 fn get_lints(&self) -> LintArray {
757 lint_array!(CHAR_LIT_AS_U8)
761 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for CharLitAsU8 {
762 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
763 use syntax::ast::{LitKind, UintTy};
765 if let ExprCast(ref e, _) = expr.node {
766 if let ExprLit(ref l) = e.node {
767 if let LitKind::Char(_) = l.node {
768 if ty::TyUint(UintTy::U8) == cx.tables.expr_ty(expr).sty && !in_macro(cx, expr.span) {
769 let msg = "casting character literal to u8. `char`s \
770 are 4 bytes wide in rust, so casting to u8 \
772 let help = format!("Consider using a byte literal instead:\nb{}", snippet(cx, e.span, "'x'"));
773 span_help_and_lint(cx, CHAR_LIT_AS_U8, expr.span, msg, &help);
781 /// **What it does:** Checks for comparisons where one side of the relation is
782 /// either the minimum or maximum value for its type and warns if it involves a
783 /// case that is always true or always false. Only integer and boolean types are
786 /// **Why is this bad?** An expression like `min <= x` may misleadingly imply
787 /// that is is possible for `x` to be less than the minimum. Expressions like
788 /// `max < x` are probably mistakes.
790 /// **Known problems:** None.
795 /// 100 > std::i32::MAX
798 pub ABSURD_EXTREME_COMPARISONS,
800 "a comparison with a maximum or minimum value that is always true or false"
803 pub struct AbsurdExtremeComparisons;
805 impl LintPass for AbsurdExtremeComparisons {
806 fn get_lints(&self) -> LintArray {
807 lint_array!(ABSURD_EXTREME_COMPARISONS)
816 struct ExtremeExpr<'a> {
821 enum AbsurdComparisonResult {
824 InequalityImpossible,
829 fn detect_absurd_comparison<'a>(
834 ) -> Option<(ExtremeExpr<'a>, AbsurdComparisonResult)> {
835 use types::ExtremeType::*;
836 use types::AbsurdComparisonResult::*;
837 use utils::comparisons::*;
839 // absurd comparison only makes sense on primitive types
840 // primitive types don't implement comparison operators with each other
841 if cx.tables.expr_ty(lhs) != cx.tables.expr_ty(rhs) {
845 let normalized = normalize_comparison(op, lhs, rhs);
846 let (rel, normalized_lhs, normalized_rhs) = if let Some(val) = normalized {
852 let lx = detect_extreme_expr(cx, normalized_lhs);
853 let rx = detect_extreme_expr(cx, normalized_rhs);
858 (Some(l @ ExtremeExpr { which: Maximum, .. }), _) => (l, AlwaysFalse), // max < x
859 (_, Some(r @ ExtremeExpr { which: Minimum, .. })) => (r, AlwaysFalse), // x < min
865 (Some(l @ ExtremeExpr { which: Minimum, .. }), _) => (l, AlwaysTrue), // min <= x
866 (Some(l @ ExtremeExpr { which: Maximum, .. }), _) => (l, InequalityImpossible), //max <= x
867 (_, Some(r @ ExtremeExpr { which: Minimum, .. })) => (r, InequalityImpossible), // x <= min
868 (_, Some(r @ ExtremeExpr { which: Maximum, .. })) => (r, AlwaysTrue), // x <= max
872 Rel::Ne | Rel::Eq => return None,
876 fn detect_extreme_expr<'a>(cx: &LateContext, expr: &'a Expr) -> Option<ExtremeExpr<'a>> {
877 use rustc::middle::const_val::ConstVal::*;
878 use rustc_const_math::*;
879 use rustc_const_eval::EvalHint::ExprTypeChecked;
880 use rustc_const_eval::*;
881 use types::ExtremeType::*;
883 let ty = &cx.tables.expr_ty(expr).sty;
886 ty::TyBool | ty::TyInt(_) | ty::TyUint(_) => (),
890 let cv = match ConstContext::with_tables(cx.tcx, cx.tables).eval(expr, ExprTypeChecked) {
892 Err(_) => return None,
895 let which = match (ty, cv) {
896 (&ty::TyBool, Bool(false)) |
897 (&ty::TyInt(IntTy::Is), Integral(Isize(Is32(::std::i32::MIN)))) |
898 (&ty::TyInt(IntTy::Is), Integral(Isize(Is64(::std::i64::MIN)))) |
899 (&ty::TyInt(IntTy::I8), Integral(I8(::std::i8::MIN))) |
900 (&ty::TyInt(IntTy::I16), Integral(I16(::std::i16::MIN))) |
901 (&ty::TyInt(IntTy::I32), Integral(I32(::std::i32::MIN))) |
902 (&ty::TyInt(IntTy::I64), Integral(I64(::std::i64::MIN))) |
903 (&ty::TyInt(IntTy::I128), Integral(I128(::std::i128::MIN))) |
904 (&ty::TyUint(UintTy::Us), Integral(Usize(Us32(::std::u32::MIN)))) |
905 (&ty::TyUint(UintTy::Us), Integral(Usize(Us64(::std::u64::MIN)))) |
906 (&ty::TyUint(UintTy::U8), Integral(U8(::std::u8::MIN))) |
907 (&ty::TyUint(UintTy::U16), Integral(U16(::std::u16::MIN))) |
908 (&ty::TyUint(UintTy::U32), Integral(U32(::std::u32::MIN))) |
909 (&ty::TyUint(UintTy::U64), Integral(U64(::std::u64::MIN))) |
910 (&ty::TyUint(UintTy::U128), Integral(U128(::std::u128::MIN))) => Minimum,
912 (&ty::TyBool, Bool(true)) |
913 (&ty::TyInt(IntTy::Is), Integral(Isize(Is32(::std::i32::MAX)))) |
914 (&ty::TyInt(IntTy::Is), Integral(Isize(Is64(::std::i64::MAX)))) |
915 (&ty::TyInt(IntTy::I8), Integral(I8(::std::i8::MAX))) |
916 (&ty::TyInt(IntTy::I16), Integral(I16(::std::i16::MAX))) |
917 (&ty::TyInt(IntTy::I32), Integral(I32(::std::i32::MAX))) |
918 (&ty::TyInt(IntTy::I64), Integral(I64(::std::i64::MAX))) |
919 (&ty::TyInt(IntTy::I128), Integral(I128(::std::i128::MAX))) |
920 (&ty::TyUint(UintTy::Us), Integral(Usize(Us32(::std::u32::MAX)))) |
921 (&ty::TyUint(UintTy::Us), Integral(Usize(Us64(::std::u64::MAX)))) |
922 (&ty::TyUint(UintTy::U8), Integral(U8(::std::u8::MAX))) |
923 (&ty::TyUint(UintTy::U16), Integral(U16(::std::u16::MAX))) |
924 (&ty::TyUint(UintTy::U32), Integral(U32(::std::u32::MAX))) |
925 (&ty::TyUint(UintTy::U64), Integral(U64(::std::u64::MAX))) |
926 (&ty::TyUint(UintTy::U128), Integral(U128(::std::u128::MAX))) => Maximum,
936 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for AbsurdExtremeComparisons {
937 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
938 use types::ExtremeType::*;
939 use types::AbsurdComparisonResult::*;
941 if let ExprBinary(ref cmp, ref lhs, ref rhs) = expr.node {
942 if let Some((culprit, result)) = detect_absurd_comparison(cx, cmp.node, lhs, rhs) {
943 if !in_macro(cx, expr.span) {
944 let msg = "this comparison involving the minimum or maximum element for this \
945 type contains a case that is always true or always false";
947 let conclusion = match result {
948 AlwaysFalse => "this comparison is always false".to_owned(),
949 AlwaysTrue => "this comparison is always true".to_owned(),
950 InequalityImpossible => {
951 format!("the case where the two sides are not equal never occurs, consider using {} == {} \
953 snippet(cx, lhs.span, "lhs"),
954 snippet(cx, rhs.span, "rhs"))
958 let help = format!("because {} is the {} value for this type, {}",
959 snippet(cx, culprit.expr.span, "x"),
960 match culprit.which {
961 Minimum => "minimum",
962 Maximum => "maximum",
966 span_help_and_lint(cx, ABSURD_EXTREME_COMPARISONS, expr.span, msg, &help);
973 /// **What it does:** Checks for comparisons where the relation is always either
974 /// true or false, but where one side has been upcast so that the comparison is
975 /// necessary. Only integer types are checked.
977 /// **Why is this bad?** An expression like `let x : u8 = ...; (x as u32) > 300`
978 /// will mistakenly imply that it is possible for `x` to be outside the range of
981 /// **Known problems:** https://github.com/Manishearth/rust-clippy/issues/886
985 /// let x : u8 = ...; (x as u32) > 300
988 pub INVALID_UPCAST_COMPARISONS,
990 "a comparison involving an upcast which is always true or false"
993 pub struct InvalidUpcastComparisons;
995 impl LintPass for InvalidUpcastComparisons {
996 fn get_lints(&self) -> LintArray {
997 lint_array!(INVALID_UPCAST_COMPARISONS)
1001 #[derive(Copy, Clone, Debug, Eq)]
1008 #[allow(cast_sign_loss)]
1009 fn cmp_s_u(s: i128, u: u128) -> Ordering {
1012 } else if u > (i128::max_value() as u128) {
1020 impl PartialEq for FullInt {
1021 fn eq(&self, other: &Self) -> bool {
1022 self.partial_cmp(other).expect("partial_cmp only returns Some(_)") == Ordering::Equal
1026 impl PartialOrd for FullInt {
1027 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1028 Some(match (self, other) {
1029 (&FullInt::S(s), &FullInt::S(o)) => s.cmp(&o),
1030 (&FullInt::U(s), &FullInt::U(o)) => s.cmp(&o),
1031 (&FullInt::S(s), &FullInt::U(o)) => Self::cmp_s_u(s, o),
1032 (&FullInt::U(s), &FullInt::S(o)) => Self::cmp_s_u(o, s).reverse(),
1036 impl Ord for FullInt {
1037 fn cmp(&self, other: &Self) -> Ordering {
1038 self.partial_cmp(other).expect("partial_cmp for FullInt can never return None")
1043 fn numeric_cast_precast_bounds<'a>(cx: &LateContext, expr: &'a Expr) -> Option<(FullInt, FullInt)> {
1044 use rustc::ty::TypeVariants::{TyInt, TyUint};
1045 use syntax::ast::{IntTy, UintTy};
1048 if let ExprCast(ref cast_exp, _) = expr.node {
1049 match cx.tables.expr_ty(cast_exp).sty {
1052 IntTy::I8 => (FullInt::S(i8::min_value() as i128), FullInt::S(i8::max_value() as i128)),
1053 IntTy::I16 => (FullInt::S(i16::min_value() as i128), FullInt::S(i16::max_value() as i128)),
1054 IntTy::I32 => (FullInt::S(i32::min_value() as i128), FullInt::S(i32::max_value() as i128)),
1055 IntTy::I64 => (FullInt::S(i64::min_value() as i128), FullInt::S(i64::max_value() as i128)),
1056 IntTy::I128 => (FullInt::S(i128::min_value() as i128), FullInt::S(i128::max_value() as i128)),
1057 IntTy::Is => (FullInt::S(isize::min_value() as i128), FullInt::S(isize::max_value() as i128)),
1060 TyUint(uint_ty) => {
1061 Some(match uint_ty {
1062 UintTy::U8 => (FullInt::U(u8::min_value() as u128), FullInt::U(u8::max_value() as u128)),
1063 UintTy::U16 => (FullInt::U(u16::min_value() as u128), FullInt::U(u16::max_value() as u128)),
1064 UintTy::U32 => (FullInt::U(u32::min_value() as u128), FullInt::U(u32::max_value() as u128)),
1065 UintTy::U64 => (FullInt::U(u64::min_value() as u128), FullInt::U(u64::max_value() as u128)),
1066 UintTy::U128 => (FullInt::U(u128::min_value() as u128), FullInt::U(u128::max_value() as u128)),
1067 UintTy::Us => (FullInt::U(usize::min_value() as u128), FullInt::U(usize::max_value() as u128)),
1077 fn node_as_const_fullint(cx: &LateContext, expr: &Expr) -> Option<FullInt> {
1078 use rustc::middle::const_val::ConstVal::*;
1079 use rustc_const_eval::EvalHint::ExprTypeChecked;
1080 use rustc_const_eval::ConstContext;
1081 use rustc_const_math::ConstInt;
1083 match ConstContext::with_tables(cx.tcx, cx.tables).eval(expr, ExprTypeChecked) {
1085 if let Integral(const_int) = val {
1086 Some(match const_int.erase_type() {
1087 ConstInt::InferSigned(x) => FullInt::S(x as i128),
1088 ConstInt::Infer(x) => FullInt::U(x as u128),
1089 _ => unreachable!(),
1099 fn err_upcast_comparison(cx: &LateContext, span: &Span, expr: &Expr, always: bool) {
1100 if let ExprCast(ref cast_val, _) = expr.node {
1102 INVALID_UPCAST_COMPARISONS,
1105 "because of the numeric bounds on `{}` prior to casting, this expression is always {}",
1106 snippet(cx, cast_val.span, "the expression"),
1107 if always { "true" } else { "false" },
1112 fn upcast_comparison_bounds_err(
1115 rel: comparisons::Rel,
1116 lhs_bounds: Option<(FullInt, FullInt)>,
1121 use utils::comparisons::*;
1123 if let Some((lb, ub)) = lhs_bounds {
1124 if let Some(norm_rhs_val) = node_as_const_fullint(cx, rhs) {
1125 if rel == Rel::Eq || rel == Rel::Ne {
1126 if norm_rhs_val < lb || norm_rhs_val > ub {
1127 err_upcast_comparison(cx, span, lhs, rel == Rel::Ne);
1129 } else if match rel {
1144 Rel::Eq | Rel::Ne => unreachable!(),
1146 err_upcast_comparison(cx, span, lhs, true)
1147 } else if match rel {
1162 Rel::Eq | Rel::Ne => unreachable!(),
1164 err_upcast_comparison(cx, span, lhs, false)
1170 impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidUpcastComparisons {
1171 fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, expr: &'tcx Expr) {
1172 if let ExprBinary(ref cmp, ref lhs, ref rhs) = expr.node {
1174 let normalized = comparisons::normalize_comparison(cmp.node, lhs, rhs);
1175 let (rel, normalized_lhs, normalized_rhs) = if let Some(val) = normalized {
1181 let lhs_bounds = numeric_cast_precast_bounds(cx, normalized_lhs);
1182 let rhs_bounds = numeric_cast_precast_bounds(cx, normalized_rhs);
1184 upcast_comparison_bounds_err(cx, &expr.span, rel, lhs_bounds, normalized_lhs, normalized_rhs, false);
1185 upcast_comparison_bounds_err(cx, &expr.span, rel, rhs_bounds, normalized_rhs, normalized_lhs, true);