2 use rustc_front::hir::*;
4 use rustc_front::util::{is_comparison_binop, binop_to_string};
5 use syntax::codemap::Span;
6 use rustc_front::intravisit::{FnKind, Visitor, walk_ty};
8 use syntax::ast::IntTy::*;
9 use syntax::ast::UintTy::*;
10 use syntax::ast::FloatTy::*;
12 use utils::{match_type, snippet, span_lint, span_help_and_lint};
13 use utils::{is_from_for_desugar, in_macro, in_external_macro};
14 use utils::{LL_PATH, VEC_PATH};
16 /// Handles all the linting of funky types
17 #[allow(missing_copy_implementations)]
20 /// **What it does:** This lint checks for use of `Box<Vec<_>>` anywhere in the code.
22 /// **Why is this bad?** `Vec` already keeps its contents in a separate area on the heap. So if you `Box` it, you just add another level of indirection without any benefit whatsoever.
24 /// **Known problems:** None
26 /// **Example:** `struct X { values: Box<Vec<Foo>> }`
27 declare_lint!(pub BOX_VEC, Warn,
28 "usage of `Box<Vec<T>>`, vector elements are already on the heap");
29 /// **What it does:** This lint checks for usage of any `LinkedList`, suggesting to use a `Vec` or a `VecDeque` (formerly called `RingBuf`).
31 /// **Why is this bad?** Gankro says:
33 /// >The TL;DR of `LinkedList` is that it's built on a massive amount of pointers and indirection. It wastes memory, it has terrible cache locality, and is all-around slow. `RingBuf`, while "only" amortized for push/pop, should be faster in the general case for almost every possible workload, and isn't even amortized at all if you can predict the capacity you need.
35 /// > `LinkedList`s are only really good if you're doing a lot of merging or splitting of lists. This is because they can just mangle some pointers instead of actually copying the data. Even if you're doing a lot of insertion in the middle of the list, `RingBuf` can still be better because of how expensive it is to seek to the middle of a `LinkedList`.
37 /// **Known problems:** False positives – the instances where using a `LinkedList` makes sense are few and far between, but they can still happen.
39 /// **Example:** `let x = LinkedList::new();`
40 declare_lint!(pub LINKEDLIST, Warn,
41 "usage of LinkedList, usually a vector is faster, or a more specialized data \
42 structure like a VecDeque");
44 impl LintPass for TypePass {
45 fn get_lints(&self) -> LintArray {
46 lint_array!(BOX_VEC, LINKEDLIST)
50 impl LateLintPass for TypePass {
51 fn check_ty(&mut self, cx: &LateContext, ast_ty: &Ty) {
52 if let Some(ty) = cx.tcx.ast_ty_to_ty_cache.borrow().get(&ast_ty.id) {
53 if let ty::TyBox(ref inner) = ty.sty {
54 if match_type(cx, inner, &VEC_PATH) {
56 cx, BOX_VEC, ast_ty.span,
57 "you seem to be trying to use `Box<Vec<T>>`. Consider using just `Vec<T>`",
58 "`Vec<T>` is already on the heap, `Box<Vec<T>>` makes an extra allocation.");
61 else if match_type(cx, ty, &LL_PATH) {
63 cx, LINKEDLIST, ast_ty.span,
64 "I see you're using a LinkedList! Perhaps you meant some other data structure?",
65 "a VecDeque might work");
71 #[allow(missing_copy_implementations)]
74 /// **What it does:** This lint checks for binding a unit value. It is `Warn` by default.
76 /// **Why is this bad?** A unit value cannot usefully be used anywhere. So binding one is kind of pointless.
78 /// **Known problems:** None
80 /// **Example:** `let x = { 1; };`
81 declare_lint!(pub LET_UNIT_VALUE, Warn,
82 "creating a let binding to a value of unit type, which usually can't be used afterwards");
84 fn check_let_unit(cx: &LateContext, decl: &Decl) {
85 if let DeclLocal(ref local) = decl.node {
86 let bindtype = &cx.tcx.pat_ty(&local.pat).sty;
87 if *bindtype == ty::TyTuple(vec![]) {
88 if in_external_macro(cx, decl.span) ||
89 in_macro(cx, local.pat.span) { return; }
90 if is_from_for_desugar(decl) { return; }
91 span_lint(cx, LET_UNIT_VALUE, decl.span, &format!(
92 "this let-binding has unit value. Consider omitting `let {} =`",
93 snippet(cx, local.pat.span, "..")));
98 impl LintPass for LetPass {
99 fn get_lints(&self) -> LintArray {
100 lint_array!(LET_UNIT_VALUE)
104 impl LateLintPass for LetPass {
105 fn check_decl(&mut self, cx: &LateContext, decl: &Decl) {
106 check_let_unit(cx, decl)
110 /// **What it does:** This lint checks for comparisons to unit. It is `Warn` by default.
112 /// **Why is this bad?** Unit is always equal to itself, and thus is just a clumsily written constant. Mostly this happens when someone accidentally adds semicolons at the end of the operands.
114 /// **Known problems:** None
116 /// **Example:** `if { foo(); } == { bar(); } { baz(); }` is equal to `{ foo(); bar(); baz(); }`
117 declare_lint!(pub UNIT_CMP, Warn,
118 "comparing unit values (which is always `true` or `false`, respectively)");
120 #[allow(missing_copy_implementations)]
123 impl LintPass for UnitCmp {
124 fn get_lints(&self) -> LintArray {
125 lint_array!(UNIT_CMP)
129 impl LateLintPass for UnitCmp {
130 fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
131 if in_macro(cx, expr.span) { return; }
132 if let ExprBinary(ref cmp, ref left, _) = expr.node {
134 let sty = &cx.tcx.expr_ty(left).sty;
135 if *sty == ty::TyTuple(vec![]) && is_comparison_binop(op) {
136 let result = match op {
137 BiEq | BiLe | BiGe => "true",
140 span_lint(cx, UNIT_CMP, expr.span, &format!(
141 "{}-comparison of unit values detected. This will always be {}",
142 binop_to_string(op), result));
150 /// **What it does:** This lint checks for casts from any numerical to a float type where the receiving type cannot store all values from the original type without rounding errors. This possible rounding is to be expected, so this lint is `Allow` by default.
152 /// Basically, this warns on casting any integer with 32 or more bits to `f32` or any 64-bit integer to `f64`.
154 /// **Why is this bad?** It's not bad at all. But in some applications it can be helpful to know where precision loss can take place. This lint can help find those places in the code.
156 /// **Known problems:** None
158 /// **Example:** `let x = u64::MAX; x as f64`
159 declare_lint!(pub CAST_PRECISION_LOSS, Allow,
160 "casts that cause loss of precision, e.g `x as f32` where `x: u64`");
161 /// **What it does:** This lint checks for casts from a signed to an unsigned numerical type. In this case, negative values wrap around to large positive values, which can be quite surprising in practice. However, as the cast works as defined, this lint is `Allow` by default.
163 /// **Why is this bad?** Possibly surprising results. You can activate this lint as a one-time check to see where numerical wrapping can arise.
165 /// **Known problems:** None
167 /// **Example:** `let y : i8 = -1; y as u64` will return 18446744073709551615
168 declare_lint!(pub CAST_SIGN_LOSS, Allow,
169 "casts from signed types to unsigned types, e.g `x as u32` where `x: i32`");
170 /// **What it does:** This lint checks for on casts between numerical types that may truncate large values. This is expected behavior, so the cast is `Allow` by default.
172 /// **Why is this bad?** In some problem domains, it is good practice to avoid truncation. This lint can be activated to help assess where additional checks could be beneficial.
174 /// **Known problems:** None
176 /// **Example:** `fn as_u8(x: u64) -> u8 { x as u8 }`
177 declare_lint!(pub CAST_POSSIBLE_TRUNCATION, Allow,
178 "casts that may cause truncation of the value, e.g `x as u8` where `x: u32`, or `x as i32` where `x: f32`");
179 /// **What it does:** This lint checks for casts from an unsigned type to a signed type of the same size. Performing such a cast is a 'no-op' for the compiler, i.e. nothing is changed at the bit level, and the binary representation of the value is reinterpreted. This can cause wrapping if the value is too big for the target signed type. However, the cast works as defined, so this lint is `Allow` by default.
181 /// **Why is this bad?** While such a cast is not bad in itself, the results can be surprising when this is not the intended behavior, as demonstrated by the example below.
183 /// **Known problems:** None
185 /// **Example:** `u32::MAX as i32` will yield a value of `-1`.
186 declare_lint!(pub CAST_POSSIBLE_WRAP, Allow,
187 "casts that may cause wrapping around the value, e.g `x as i32` where `x: u32` and `x > i32::MAX`");
189 /// Returns the size in bits of an integral type.
190 /// Will return 0 if the type is not an int or uint variant
191 fn int_ty_to_nbits(typ: &ty::TyS) -> usize {
192 let n = match typ.sty {
193 ty::TyInt(i) => 4 << (i as usize),
194 ty::TyUint(u) => 4 << (u as usize),
197 // n == 4 is the usize/isize case
198 if n == 4 { ::std::usize::BITS } else { n }
201 fn is_isize_or_usize(typ: &ty::TyS) -> bool {
203 ty::TyInt(TyIs) | ty::TyUint(TyUs) => true,
208 fn span_precision_loss_lint(cx: &LateContext, expr: &Expr, cast_from: &ty::TyS, cast_to_f64: bool) {
209 let mantissa_nbits = if cast_to_f64 {52} else {23};
210 let arch_dependent = is_isize_or_usize(cast_from) && cast_to_f64;
211 let arch_dependent_str = "on targets with 64-bit wide pointers ";
212 let from_nbits_str = if arch_dependent {"64".to_owned()}
213 else if is_isize_or_usize(cast_from) {"32 or 64".to_owned()}
214 else {int_ty_to_nbits(cast_from).to_string()};
215 span_lint(cx, CAST_PRECISION_LOSS, expr.span,
216 &format!("casting {0} to {1} causes a loss of precision {2}\
217 ({0} is {3} bits wide, but {1}'s mantissa is only {4} bits wide)",
218 cast_from, if cast_to_f64 {"f64"} else {"f32"},
219 if arch_dependent {arch_dependent_str} else {""},
220 from_nbits_str, mantissa_nbits));
227 fn check_truncation_and_wrapping(cx: &LateContext, expr: &Expr, cast_from: &ty::TyS, cast_to: &ty::TyS) {
228 let arch_64_suffix = " on targets with 64-bit wide pointers";
229 let arch_32_suffix = " on targets with 32-bit wide pointers";
230 let cast_unsigned_to_signed = !cast_from.is_signed() && cast_to.is_signed();
231 let (from_nbits, to_nbits) = (int_ty_to_nbits(cast_from), int_ty_to_nbits(cast_to));
232 let (span_truncation, suffix_truncation, span_wrap, suffix_wrap) =
233 match (is_isize_or_usize(cast_from), is_isize_or_usize(cast_to)) {
234 (true, true) | (false, false) => (
235 to_nbits < from_nbits,
237 to_nbits == from_nbits && cast_unsigned_to_signed,
242 if to_nbits == 32 {ArchSuffix::_64} else {ArchSuffix::None},
243 to_nbits <= 32 && cast_unsigned_to_signed,
249 cast_unsigned_to_signed,
250 if from_nbits == 64 {ArchSuffix::_64} else {ArchSuffix::_32}
254 span_lint(cx, CAST_POSSIBLE_TRUNCATION, expr.span,
255 &format!("casting {} to {} may truncate the value{}",
257 match suffix_truncation {
258 ArchSuffix::_32 => arch_32_suffix,
259 ArchSuffix::_64 => arch_64_suffix,
260 ArchSuffix::None => "" }));
263 span_lint(cx, CAST_POSSIBLE_WRAP, expr.span,
264 &format!("casting {} to {} may wrap around the value{}",
267 ArchSuffix::_32 => arch_32_suffix,
268 ArchSuffix::_64 => arch_64_suffix,
269 ArchSuffix::None => "" }));
273 impl LintPass for CastPass {
274 fn get_lints(&self) -> LintArray {
275 lint_array!(CAST_PRECISION_LOSS,
277 CAST_POSSIBLE_TRUNCATION,
282 impl LateLintPass for CastPass {
283 fn check_expr(&mut self, cx: &LateContext, expr: &Expr) {
284 if let ExprCast(ref ex, _) = expr.node {
285 let (cast_from, cast_to) = (cx.tcx.expr_ty(ex), cx.tcx.expr_ty(expr));
286 if cast_from.is_numeric() && cast_to.is_numeric() && !in_external_macro(cx, expr.span) {
287 match (cast_from.is_integral(), cast_to.is_integral()) {
289 let from_nbits = int_ty_to_nbits(cast_from);
290 let to_nbits = if let ty::TyFloat(TyF32) = cast_to.sty {32} else {64};
291 if is_isize_or_usize(cast_from) || from_nbits >= to_nbits {
292 span_precision_loss_lint(cx, expr, cast_from, to_nbits == 64);
296 span_lint(cx, CAST_POSSIBLE_TRUNCATION, expr.span,
297 &format!("casting {} to {} may truncate the value",
298 cast_from, cast_to));
299 if !cast_to.is_signed() {
300 span_lint(cx, CAST_SIGN_LOSS, expr.span,
301 &format!("casting {} to {} may lose the sign of the value",
302 cast_from, cast_to));
306 if cast_from.is_signed() && !cast_to.is_signed() {
307 span_lint(cx, CAST_SIGN_LOSS, expr.span,
308 &format!("casting {} to {} may lose the sign of the value",
309 cast_from, cast_to));
311 check_truncation_and_wrapping(cx, expr, cast_from, cast_to);
314 if let (&ty::TyFloat(TyF64),
315 &ty::TyFloat(TyF32)) = (&cast_from.sty, &cast_to.sty) {
316 span_lint(cx, CAST_POSSIBLE_TRUNCATION,
318 "casting f64 to f32 may truncate the value");
327 /// **What it does:** This lint checks for types used in structs, parameters and `let` declarations above a certain complexity threshold. It is `Warn` by default.
329 /// **Why is this bad?** Too complex types make the code less readable. Consider using a `type` definition to simplify them.
331 /// **Known problems:** None
333 /// **Example:** `struct Foo { inner: Rc<Vec<Vec<Box<(u32, u32, u32, u32)>>>> }`
334 declare_lint!(pub TYPE_COMPLEXITY, Warn,
335 "usage of very complex types; recommends factoring out parts into `type` definitions");
337 #[allow(missing_copy_implementations)]
338 pub struct TypeComplexityPass;
340 impl LintPass for TypeComplexityPass {
341 fn get_lints(&self) -> LintArray {
342 lint_array!(TYPE_COMPLEXITY)
346 impl LateLintPass for TypeComplexityPass {
347 fn check_fn(&mut self, cx: &LateContext, _: FnKind, decl: &FnDecl, _: &Block, _: Span, _: NodeId) {
348 check_fndecl(cx, decl);
351 fn check_struct_field(&mut self, cx: &LateContext, field: &StructField) {
352 // enum variants are also struct fields now
353 check_type(cx, &field.node.ty);
356 fn check_item(&mut self, cx: &LateContext, item: &Item) {
358 ItemStatic(ref ty, _, _) |
359 ItemConst(ref ty, _) => check_type(cx, ty),
360 // functions, enums, structs, impls and traits are covered
365 fn check_trait_item(&mut self, cx: &LateContext, item: &TraitItem) {
367 ConstTraitItem(ref ty, _) |
368 TypeTraitItem(_, Some(ref ty)) => check_type(cx, ty),
369 MethodTraitItem(MethodSig { ref decl, .. }, None) => check_fndecl(cx, decl),
370 // methods with default impl are covered by check_fn
375 fn check_impl_item(&mut self, cx: &LateContext, item: &ImplItem) {
377 ImplItemKind::Const(ref ty, _) |
378 ImplItemKind::Type(ref ty) => check_type(cx, ty),
379 // methods are covered by check_fn
384 fn check_local(&mut self, cx: &LateContext, local: &Local) {
385 if let Some(ref ty) = local.ty {
391 fn check_fndecl(cx: &LateContext, decl: &FnDecl) {
392 for arg in &decl.inputs {
393 check_type(cx, &arg.ty);
395 if let Return(ref ty) = decl.output {
400 fn check_type(cx: &LateContext, ty: &Ty) {
401 if in_macro(cx, ty.span) { return; }
403 let mut visitor = TypeComplexityVisitor { score: 0, nest: 1 };
404 visitor.visit_ty(ty);
407 // println!("{:?} --> {}", ty, score);
409 span_lint(cx, TYPE_COMPLEXITY, ty.span, &format!(
410 "very complex type used. Consider factoring parts into `type` definitions"));
414 /// Walks a type and assigns a complexity score to it.
415 struct TypeComplexityVisitor {
416 /// total complexity score of the type
418 /// current nesting level
422 impl<'v> Visitor<'v> for TypeComplexityVisitor {
423 fn visit_ty(&mut self, ty: &'v Ty) {
424 let (add_score, sub_nest) = match ty.node {
425 // _, &x and *x have only small overhead; don't mess with nesting level
428 TyRptr(..) => (1, 0),
430 // the "normal" components of a type: named types, arrays/tuples
434 TyFixedLengthVec(..) => (10 * self.nest, 1),
436 // "Sum" of trait bounds
437 TyObjectSum(..) => (20 * self.nest, 0),
439 // function types and "for<...>" bring a lot of overhead
441 TyPolyTraitRef(..) => (50 * self.nest, 1),
445 self.score += add_score;
446 self.nest += sub_nest;
448 self.nest -= sub_nest;