1 #![deny(rustc::untranslatable_diagnostic)]
2 #![deny(rustc::diagnostic_outside_of_impl)]
4 AtomicOrderingFence, AtomicOrderingLoad, AtomicOrderingStore, InvalidAtomicOrderingDiag,
5 OnlyCastu8ToChar, OverflowingBinHex, OverflowingBinHexSign, OverflowingBinHexSub,
6 OverflowingInt, OverflowingLiteral, OverflowingUInt, RangeEndpointOutOfRange,
7 UnusedComparisons, VariantSizeDifferencesDiag,
9 use crate::{LateContext, LateLintPass, LintContext};
11 use rustc_attr as attr;
12 use rustc_data_structures::fx::FxHashSet;
13 use rustc_errors::{fluent, DiagnosticMessage};
15 use rustc_hir::{is_range_literal, Expr, ExprKind, Node};
16 use rustc_middle::ty::layout::{IntegerExt, LayoutOf, SizeSkeleton};
17 use rustc_middle::ty::subst::SubstsRef;
18 use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable};
19 use rustc_span::source_map;
20 use rustc_span::symbol::sym;
21 use rustc_span::{Span, Symbol};
22 use rustc_target::abi::{Abi, Size, WrappingRange};
23 use rustc_target::abi::{Integer, TagEncoding, Variants};
24 use rustc_target::spec::abi::Abi as SpecAbi;
27 use std::ops::ControlFlow;
30 /// The `unused_comparisons` lint detects comparisons made useless by
31 /// limits of the types involved.
45 /// A useless comparison may indicate a mistake, and should be fixed or
49 "comparisons made useless by limits of the types involved"
53 /// The `overflowing_literals` lint detects literal out of range for its
58 /// ```rust,compile_fail
66 /// It is usually a mistake to use a literal that overflows the type where
67 /// it is used. Either use a literal that is within range, or change the
68 /// type to be within the range of the literal.
71 "literal out of range for its type"
75 /// The `variant_size_differences` lint detects enums with widely varying
80 /// ```rust,compile_fail
81 /// #![deny(variant_size_differences)]
92 /// It can be a mistake to add a variant to an enum that is much larger
93 /// than the other variants, bloating the overall size required for all
94 /// variants. This can impact performance and memory usage. This is
95 /// triggered if one variant is more than 3 times larger than the
96 /// second-largest variant.
98 /// Consider placing the large variant's contents on the heap (for example
99 /// via [`Box`]) to keep the overall size of the enum itself down.
101 /// This lint is "allow" by default because it can be noisy, and may not be
102 /// an actual problem. Decisions about this should be guided with
103 /// profiling and benchmarking.
105 /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
106 VARIANT_SIZE_DIFFERENCES,
108 "detects enums with widely varying variant sizes"
111 #[derive(Copy, Clone)]
112 pub struct TypeLimits {
113 /// Id of the last visited negated expression
114 negated_expr_id: Option<hir::HirId>,
117 impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]);
120 pub fn new() -> TypeLimits {
121 TypeLimits { negated_expr_id: None }
125 /// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint (`expr..MAX+1`).
126 /// Returns `true` iff the lint was emitted.
127 fn lint_overflowing_range_endpoint<'tcx>(
128 cx: &LateContext<'tcx>,
132 expr: &'tcx hir::Expr<'tcx>,
135 // We only want to handle exclusive (`..`) ranges,
136 // which are represented as `ExprKind::Struct`.
137 let par_id = cx.tcx.hir().parent_id(expr.hir_id);
138 let Node::ExprField(field) = cx.tcx.hir().get(par_id) else { return false };
139 let Node::Expr(struct_expr) = cx.tcx.hir().get_parent(field.hir_id) else { return false };
140 if !is_range_literal(struct_expr) {
143 let ExprKind::Struct(_, eps, _) = &struct_expr.kind else { return false };
148 // We can suggest using an inclusive range
149 // (`..=`) instead only if it is the `end` that is
150 // overflowing and only by 1.
151 if !(eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max) {
154 let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) else { return false };
156 use rustc_ast::{LitIntType, LitKind};
157 let suffix = match lit.node {
158 LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
159 LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
160 LitKind::Int(_, LitIntType::Unsuffixed) => "",
163 cx.emit_spanned_lint(
164 OVERFLOWING_LITERALS,
166 RangeEndpointOutOfRange {
168 suggestion: struct_expr.span,
170 literal: lit_val - 1,
175 // We've just emitted a lint, special cased for `(...)..MAX+1` ranges,
176 // return `true` so the callers don't also emit a lint
180 // For `isize` & `usize`, be conservative with the warnings, so that the
181 // warnings are consistent between 32- and 64-bit platforms.
182 fn int_ty_range(int_ty: ty::IntTy) -> (i128, i128) {
184 ty::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()),
185 ty::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()),
186 ty::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()),
187 ty::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()),
188 ty::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()),
189 ty::IntTy::I128 => (i128::MIN, i128::MAX),
193 fn uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128) {
194 let max = match uint_ty {
195 ty::UintTy::Usize => u64::MAX.into(),
196 ty::UintTy::U8 => u8::MAX.into(),
197 ty::UintTy::U16 => u16::MAX.into(),
198 ty::UintTy::U32 => u32::MAX.into(),
199 ty::UintTy::U64 => u64::MAX.into(),
200 ty::UintTy::U128 => u128::MAX,
205 fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> {
206 let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?;
207 let firstch = src.chars().next()?;
210 match src.chars().nth(1) {
211 Some('x' | 'b') => return Some(src),
219 fn report_bin_hex_error(
220 cx: &LateContext<'_>,
221 expr: &hir::Expr<'_>,
228 let (t, actually) = match ty {
229 attr::IntType::SignedInt(t) => {
230 let actually = if negative {
231 -(size.sign_extend(val) as i128)
233 size.sign_extend(val) as i128
235 (t.name_str(), actually.to_string())
237 attr::IntType::UnsignedInt(t) => {
238 let actually = size.truncate(val);
239 (t.name_str(), actually.to_string())
243 if negative { OverflowingBinHexSign::Negative } else { OverflowingBinHexSign::Positive };
244 let sub = get_type_suggestion(cx.typeck_results().node_type(expr.hir_id), val, negative).map(
246 if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
247 let (sans_suffix, _) = repr_str.split_at(pos);
248 OverflowingBinHexSub::Suggestion { span: expr.span, suggestion_ty, sans_suffix }
250 OverflowingBinHexSub::Help { suggestion_ty }
254 cx.emit_spanned_lint(
255 OVERFLOWING_LITERALS,
257 OverflowingBinHex { ty: t, lit: repr_str.clone(), dec: val, actually, sign, sub },
261 // This function finds the next fitting type and generates a suggestion string.
262 // It searches for fitting types in the following way (`X < Y`):
263 // - `iX`: if literal fits in `uX` => `uX`, else => `iY`
267 // No suggestion for: `isize`, `usize`.
268 fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> {
271 macro_rules! find_fit {
272 ($ty:expr, $val:expr, $negative:expr,
273 $($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => {
275 let _neg = if negative { 1 } else { 0 };
278 $(if !negative && val <= uint_ty_range($utypes).1 {
279 return Some($utypes.name_str())
281 $(if val <= int_ty_range($itypes).1 as u128 + _neg {
282 return Some($itypes.name_str())
292 ty::Int(i) => find_fit!(i, val, negative,
293 I8 => [U8] => [I16, I32, I64, I128],
294 I16 => [U16] => [I32, I64, I128],
295 I32 => [U32] => [I64, I128],
296 I64 => [U64] => [I128],
297 I128 => [U128] => []),
298 ty::Uint(u) => find_fit!(u, val, negative,
299 U8 => [U8, U16, U32, U64, U128] => [],
300 U16 => [U16, U32, U64, U128] => [],
301 U32 => [U32, U64, U128] => [],
302 U64 => [U64, U128] => [],
303 U128 => [U128] => []),
308 fn lint_int_literal<'tcx>(
309 cx: &LateContext<'tcx>,
310 type_limits: &TypeLimits,
311 e: &'tcx hir::Expr<'tcx>,
316 let int_type = t.normalize(cx.sess().target.pointer_width);
317 let (min, max) = int_ty_range(int_type);
318 let max = max as u128;
319 let negative = type_limits.negated_expr_id == Some(e.hir_id);
321 // Detect literal value out of range [min, max] inclusive
322 // avoiding use of -min to prevent overflow/panic
323 if (negative && v > max + 1) || (!negative && v > max) {
324 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
325 report_bin_hex_error(
328 attr::IntType::SignedInt(ty::ast_int_ty(t)),
329 Integer::from_int_ty(cx, t).size(),
337 if lint_overflowing_range_endpoint(cx, lit, v, max, e, t.name_str()) {
338 // The overflowing literal lint was emitted by `lint_overflowing_range_endpoint`.
342 cx.emit_spanned_lint(
343 OVERFLOWING_LITERALS,
350 .span_to_snippet(lit.span)
351 .expect("must get snippet from literal"),
354 suggestion_ty: get_type_suggestion(
355 cx.typeck_results().node_type(e.hir_id),
364 fn lint_uint_literal<'tcx>(
365 cx: &LateContext<'tcx>,
366 e: &'tcx hir::Expr<'tcx>,
370 let uint_type = t.normalize(cx.sess().target.pointer_width);
371 let (min, max) = uint_ty_range(uint_type);
372 let lit_val: u128 = match lit.node {
373 // _v is u8, within range by definition
374 ast::LitKind::Byte(_v) => return,
375 ast::LitKind::Int(v, _) => v,
378 if lit_val < min || lit_val > max {
379 let parent_id = cx.tcx.hir().parent_id(e.hir_id);
380 if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) {
382 hir::ExprKind::Cast(..) => {
383 if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() {
384 cx.emit_spanned_lint(
385 OVERFLOWING_LITERALS,
387 OnlyCastu8ToChar { span: par_e.span, literal: lit_val },
395 if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, t.name_str()) {
396 // The overflowing literal lint was emitted by `lint_overflowing_range_endpoint`.
399 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
400 report_bin_hex_error(
403 attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
404 Integer::from_uint_ty(cx, t).size(),
411 cx.emit_spanned_lint(
412 OVERFLOWING_LITERALS,
419 .span_to_snippet(lit.span)
420 .expect("must get snippet from literal"),
428 fn lint_literal<'tcx>(
429 cx: &LateContext<'tcx>,
430 type_limits: &TypeLimits,
431 e: &'tcx hir::Expr<'tcx>,
434 match *cx.typeck_results().node_type(e.hir_id).kind() {
437 ast::LitKind::Int(v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed) => {
438 lint_int_literal(cx, type_limits, e, lit, t, v)
443 ty::Uint(t) => lint_uint_literal(cx, e, lit, t),
445 let is_infinite = match lit.node {
446 ast::LitKind::Float(v, _) => match t {
447 ty::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite),
448 ty::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite),
452 if is_infinite == Ok(true) {
453 cx.emit_spanned_lint(
454 OVERFLOWING_LITERALS,
461 .span_to_snippet(lit.span)
462 .expect("must get snippet from literal"),
471 impl<'tcx> LateLintPass<'tcx> for TypeLimits {
472 fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
474 hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
475 // propagate negation, if the negation itself isn't negated
476 if self.negated_expr_id != Some(e.hir_id) {
477 self.negated_expr_id = Some(expr.hir_id);
480 hir::ExprKind::Binary(binop, ref l, ref r) => {
481 if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
482 cx.emit_spanned_lint(UNUSED_COMPARISONS, e.span, UnusedComparisons);
485 hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
489 fn is_valid<T: PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool {
491 hir::BinOpKind::Lt => v > min && v <= max,
492 hir::BinOpKind::Le => v >= min && v < max,
493 hir::BinOpKind::Gt => v >= min && v < max,
494 hir::BinOpKind::Ge => v > min && v <= max,
495 hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
500 fn rev_binop(binop: hir::BinOp) -> hir::BinOp {
504 hir::BinOpKind::Lt => hir::BinOpKind::Gt,
505 hir::BinOpKind::Le => hir::BinOpKind::Ge,
506 hir::BinOpKind::Gt => hir::BinOpKind::Lt,
507 hir::BinOpKind::Ge => hir::BinOpKind::Le,
514 cx: &LateContext<'_>,
519 let (lit, expr, swap) = match (&l.kind, &r.kind) {
520 (&hir::ExprKind::Lit(_), _) => (l, r, true),
521 (_, &hir::ExprKind::Lit(_)) => (r, l, false),
524 // Normalize the binop so that the literal is always on the RHS in
526 let norm_binop = if swap { rev_binop(binop) } else { binop };
527 match *cx.typeck_results().node_type(expr.hir_id).kind() {
529 let (min, max) = int_ty_range(int_ty);
530 let lit_val: i128 = match lit.kind {
531 hir::ExprKind::Lit(ref li) => match li.node {
534 ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed,
540 is_valid(norm_binop, lit_val, min, max)
542 ty::Uint(uint_ty) => {
543 let (min, max): (u128, u128) = uint_ty_range(uint_ty);
544 let lit_val: u128 = match lit.kind {
545 hir::ExprKind::Lit(ref li) => match li.node {
546 ast::LitKind::Int(v, _) => v,
551 is_valid(norm_binop, lit_val, min, max)
557 fn is_comparison(binop: hir::BinOp) -> bool {
572 /// The `improper_ctypes` lint detects incorrect use of types in foreign
579 /// static STATIC: String;
587 /// The compiler has several checks to verify that types used in `extern`
588 /// blocks are safe and follow certain rules to ensure proper
589 /// compatibility with the foreign interfaces. This lint is issued when it
590 /// detects a probable mistake in a definition. The lint usually should
591 /// provide a description of the issue, along with possibly a hint on how
595 "proper use of libc types in foreign modules"
598 declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]);
601 /// The `improper_ctypes_definitions` lint detects incorrect use of
602 /// [`extern` function] definitions.
604 /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
609 /// # #![allow(unused)]
610 /// pub extern "C" fn str_type(p: &str) { }
617 /// There are many parameter and return types that may be specified in an
618 /// `extern` function that are not compatible with the given ABI. This
619 /// lint is an alert that these types should not be used. The lint usually
620 /// should provide a description of the issue, along with possibly a hint
621 /// on how to resolve it.
622 IMPROPER_CTYPES_DEFINITIONS,
624 "proper use of libc types in foreign item definitions"
627 declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS]);
629 #[derive(Clone, Copy)]
630 pub(crate) enum CItemKind {
635 struct ImproperCTypesVisitor<'a, 'tcx> {
636 cx: &'a LateContext<'tcx>,
640 enum FfiResult<'tcx> {
642 FfiPhantom(Ty<'tcx>),
643 FfiUnsafe { ty: Ty<'tcx>, reason: DiagnosticMessage, help: Option<DiagnosticMessage> },
646 pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
648 def: ty::AdtDef<'tcx>,
650 tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
653 /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
655 pub fn transparent_newtype_field<'a, 'tcx>(
657 variant: &'a ty::VariantDef,
658 ) -> Option<&'a ty::FieldDef> {
659 let param_env = tcx.param_env(variant.def_id);
660 variant.fields.iter().find(|field| {
661 let field_ty = tcx.type_of(field.did);
662 let is_zst = tcx.layout_of(param_env.and(field_ty)).map_or(false, |layout| layout.is_zst());
667 /// Is type known to be non-null?
668 fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
671 ty::FnPtr(_) => true,
673 ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
674 ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
675 let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
681 // `UnsafeCell` has its niche hidden.
682 if def.is_unsafe_cell() {
688 .filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
689 .any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
695 /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
696 /// If the type passed in was not scalar, returns None.
697 fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
699 Some(match *ty.kind() {
700 ty::Adt(field_def, field_substs) => {
701 let inner_field_ty = {
702 let mut first_non_zst_ty = field_def
705 .filter_map(|v| transparent_newtype_field(cx.tcx, v));
707 first_non_zst_ty.clone().count(),
709 "Wrong number of fields for transparent type"
713 .expect("No non-zst fields in transparent type.")
714 .ty(tcx, field_substs)
716 return get_nullable_type(cx, inner_field_ty);
718 ty::Int(ty) => tcx.mk_mach_int(ty),
719 ty::Uint(ty) => tcx.mk_mach_uint(ty),
720 ty::RawPtr(ty_mut) => tcx.mk_ptr(ty_mut),
721 // As these types are always non-null, the nullable equivalent of
722 // Option<T> of these types are their raw pointer counterparts.
723 ty::Ref(_region, ty, mutbl) => tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }),
725 // There is no nullable equivalent for Rust's function pointers -- you
726 // must use an Option<fn(..) -> _> to represent it.
730 // We should only ever reach this case if ty_is_known_nonnull is extended
734 "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
742 /// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
743 /// can, return the type that `ty` can be safely converted to, otherwise return `None`.
744 /// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`,
745 /// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
746 /// FIXME: This duplicates code in codegen.
747 pub(crate) fn repr_nullable_ptr<'tcx>(
748 cx: &LateContext<'tcx>,
751 ) -> Option<Ty<'tcx>> {
752 debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
753 if let ty::Adt(ty_def, substs) = ty.kind() {
754 let field_ty = match &ty_def.variants().raw[..] {
755 [var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
756 ([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
762 if !ty_is_known_nonnull(cx, field_ty, ckind) {
766 // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
767 // If the computed size for the field and the enum are different, the nonnull optimization isn't
768 // being applied (and we've got a problem somewhere).
769 let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap();
770 if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
771 bug!("improper_ctypes: Option nonnull optimization not applied?");
774 // Return the nullable type this Option-like enum can be safely represented with.
775 let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
776 if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
777 match field_ty_scalar.valid_range(cx) {
778 WrappingRange { start: 0, end }
779 if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
781 return Some(get_nullable_type(cx, field_ty).unwrap());
783 WrappingRange { start: 1, .. } => {
784 return Some(get_nullable_type(cx, field_ty).unwrap());
786 WrappingRange { start, end } => {
787 unreachable!("Unhandled start and end range: ({}, {})", start, end)
795 impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
796 /// Check if the type is array and emit an unsafe type lint.
797 fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
798 if let ty::Array(..) = ty.kind() {
799 self.emit_ffi_unsafe_type_lint(
802 fluent::lint_improper_ctypes_array_reason,
803 Some(fluent::lint_improper_ctypes_array_help),
811 /// Checks if the given field's type is "ffi-safe".
812 fn check_field_type_for_ffi(
814 cache: &mut FxHashSet<Ty<'tcx>>,
815 field: &ty::FieldDef,
816 substs: SubstsRef<'tcx>,
817 ) -> FfiResult<'tcx> {
818 let field_ty = field.ty(self.cx.tcx, substs);
819 if field_ty.has_opaque_types() {
820 self.check_type_for_ffi(cache, field_ty)
822 let field_ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, field_ty);
823 self.check_type_for_ffi(cache, field_ty)
827 /// Checks if the given `VariantDef`'s field types are "ffi-safe".
828 fn check_variant_for_ffi(
830 cache: &mut FxHashSet<Ty<'tcx>>,
832 def: ty::AdtDef<'tcx>,
833 variant: &ty::VariantDef,
834 substs: SubstsRef<'tcx>,
835 ) -> FfiResult<'tcx> {
838 if def.repr().transparent() {
839 // Can assume that at most one field is not a ZST, so only check
840 // that field's type for FFI-safety.
841 if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
842 self.check_field_type_for_ffi(cache, field, substs)
844 // All fields are ZSTs; this means that the type should behave
845 // like (), which is FFI-unsafe
846 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None }
849 // We can't completely trust repr(C) markings; make sure the fields are
851 let mut all_phantom = !variant.fields.is_empty();
852 for field in &variant.fields {
853 match self.check_field_type_for_ffi(cache, &field, substs) {
857 FfiPhantom(..) if def.is_enum() => {
860 reason: fluent::lint_improper_ctypes_enum_phantomdata,
869 if all_phantom { FfiPhantom(ty) } else { FfiSafe }
873 /// Checks if the given type is "ffi-safe" (has a stable, well-defined
874 /// representation which can be exported to C code).
875 fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> {
878 let tcx = self.cx.tcx;
880 // Protect against infinite recursion, for example
881 // `struct S(*mut S);`.
882 // FIXME: A recursion limit is necessary as well, for irregular
884 if !cache.insert(ty) {
889 ty::Adt(def, substs) => {
890 if def.is_box() && matches!(self.mode, CItemKind::Definition) {
891 if ty.boxed_ty().is_sized(tcx, self.cx.param_env) {
896 reason: fluent::lint_improper_ctypes_box,
901 if def.is_phantom_data() {
902 return FfiPhantom(ty);
904 match def.adt_kind() {
905 AdtKind::Struct | AdtKind::Union => {
906 if !def.repr().c() && !def.repr().transparent() {
909 reason: if def.is_struct() {
910 fluent::lint_improper_ctypes_struct_layout_reason
912 fluent::lint_improper_ctypes_union_layout_reason
914 help: if def.is_struct() {
915 Some(fluent::lint_improper_ctypes_struct_layout_help)
917 Some(fluent::lint_improper_ctypes_union_layout_help)
922 let is_non_exhaustive =
923 def.non_enum_variant().is_field_list_non_exhaustive();
924 if is_non_exhaustive && !def.did().is_local() {
927 reason: if def.is_struct() {
928 fluent::lint_improper_ctypes_struct_non_exhaustive
930 fluent::lint_improper_ctypes_union_non_exhaustive
936 if def.non_enum_variant().fields.is_empty() {
939 reason: if def.is_struct() {
940 fluent::lint_improper_ctypes_struct_fieldless_reason
942 fluent::lint_improper_ctypes_union_fieldless_reason
944 help: if def.is_struct() {
945 Some(fluent::lint_improper_ctypes_struct_fieldless_help)
947 Some(fluent::lint_improper_ctypes_union_fieldless_help)
952 self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
955 if def.variants().is_empty() {
956 // Empty enums are okay... although sort of useless.
960 // Check for a repr() attribute to specify the size of the
962 if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
964 // Special-case types like `Option<extern fn()>`.
965 if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
968 reason: fluent::lint_improper_ctypes_enum_repr_reason,
969 help: Some(fluent::lint_improper_ctypes_enum_repr_help),
974 if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
977 reason: fluent::lint_improper_ctypes_non_exhaustive,
982 // Check the contained variants.
983 for variant in def.variants() {
984 let is_non_exhaustive = variant.is_field_list_non_exhaustive();
985 if is_non_exhaustive && !variant.def_id.is_local() {
988 reason: fluent::lint_improper_ctypes_non_exhaustive_variant,
993 match self.check_variant_for_ffi(cache, ty, def, variant, substs) {
1004 ty::Char => FfiUnsafe {
1006 reason: fluent::lint_improper_ctypes_char_reason,
1007 help: Some(fluent::lint_improper_ctypes_char_help),
1010 ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
1011 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_128bit, help: None }
1014 // Primitive types with a stable representation.
1015 ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
1017 ty::Slice(_) => FfiUnsafe {
1019 reason: fluent::lint_improper_ctypes_slice_reason,
1020 help: Some(fluent::lint_improper_ctypes_slice_help),
1023 ty::Dynamic(..) => {
1024 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None }
1027 ty::Str => FfiUnsafe {
1029 reason: fluent::lint_improper_ctypes_str_reason,
1030 help: Some(fluent::lint_improper_ctypes_str_help),
1033 ty::Tuple(..) => FfiUnsafe {
1035 reason: fluent::lint_improper_ctypes_tuple_reason,
1036 help: Some(fluent::lint_improper_ctypes_tuple_help),
1039 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _)
1041 matches!(self.mode, CItemKind::Definition)
1042 && ty.is_sized(self.cx.tcx, self.cx.param_env)
1048 ty::RawPtr(ty::TypeAndMut { ty, .. })
1049 if match ty.kind() {
1050 ty::Tuple(tuple) => tuple.is_empty(),
1057 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
1058 self.check_type_for_ffi(cache, ty)
1061 ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty),
1064 if self.is_internal_abi(sig.abi()) {
1067 reason: fluent::lint_improper_ctypes_fnptr_reason,
1068 help: Some(fluent::lint_improper_ctypes_fnptr_help),
1072 let sig = tcx.erase_late_bound_regions(sig);
1073 if !sig.output().is_unit() {
1074 let r = self.check_type_for_ffi(cache, sig.output());
1082 for arg in sig.inputs() {
1083 let r = self.check_type_for_ffi(cache, *arg);
1094 ty::Foreign(..) => FfiSafe,
1096 // While opaque types are checked for earlier, if a projection in a struct field
1097 // normalizes to an opaque type, then it will reach this branch.
1098 ty::Alias(ty::Opaque, ..) => {
1099 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None }
1102 // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
1103 // so they are currently ignored for the purposes of this lint.
1104 ty::Param(..) | ty::Alias(ty::Projection, ..)
1105 if matches!(self.mode, CItemKind::Definition) =>
1111 | ty::Alias(ty::Projection, ..)
1117 | ty::GeneratorWitness(..)
1118 | ty::Placeholder(..)
1119 | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
1123 fn emit_ffi_unsafe_type_lint(
1127 note: DiagnosticMessage,
1128 help: Option<DiagnosticMessage>,
1130 let lint = match self.mode {
1131 CItemKind::Declaration => IMPROPER_CTYPES,
1132 CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
1135 self.cx.struct_span_lint(lint, sp, fluent::lint_improper_ctypes, |lint| {
1136 let item_description = match self.mode {
1137 CItemKind::Declaration => "block",
1138 CItemKind::Definition => "fn",
1140 #[allow(rustc::diagnostic_outside_of_impl)]
1141 let mut diag = lint.build(fluent::lint_improper_ctypes);
1142 diag.set_arg("ty", ty);
1143 diag.set_arg("desc", item_description);
1144 diag.span_label(sp, fluent::label);
1145 if let Some(help) = help {
1149 if let ty::Adt(def, _) = ty.kind() {
1150 if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
1151 lint.span_note(sp, fluent::note);
1158 fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
1159 struct ProhibitOpaqueTypes;
1160 impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueTypes {
1161 type BreakTy = Ty<'tcx>;
1163 fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
1164 if !ty.has_opaque_types() {
1165 return ControlFlow::CONTINUE;
1168 if let ty::Alias(ty::Opaque, ..) = ty.kind() {
1169 ControlFlow::Break(ty)
1171 ty.super_visit_with(self)
1176 if let Some(ty) = self
1179 .normalize_erasing_regions(self.cx.param_env, ty)
1180 .visit_with(&mut ProhibitOpaqueTypes)
1183 self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint_improper_ctypes_opaque, None);
1190 fn check_type_for_ffi_and_report_errors(
1195 is_return_type: bool,
1197 // We have to check for opaque types before `normalize_erasing_regions`,
1198 // which will replace opaque types with their underlying concrete type.
1199 if self.check_for_opaque_ty(sp, ty) {
1200 // We've already emitted an error due to an opaque type.
1204 // it is only OK to use this function because extern fns cannot have
1205 // any generic types right now:
1206 let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
1208 // C doesn't really support passing arrays by value - the only way to pass an array by value
1209 // is through a struct. So, first test that the top level isn't an array, and then
1210 // recursively check the types inside.
1211 if !is_static && self.check_for_array_ty(sp, ty) {
1215 // Don't report FFI errors for unit return types. This check exists here, and not in
1216 // `check_foreign_fn` (where it would make more sense) so that normalization has definitely
1218 if is_return_type && ty.is_unit() {
1222 match self.check_type_for_ffi(&mut FxHashSet::default(), ty) {
1223 FfiResult::FfiSafe => {}
1224 FfiResult::FfiPhantom(ty) => {
1225 self.emit_ffi_unsafe_type_lint(
1228 fluent::lint_improper_ctypes_only_phantomdata,
1232 // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic
1233 // argument, which after substitution, is `()`, then this branch can be hit.
1234 FfiResult::FfiUnsafe { ty, .. } if is_return_type && ty.is_unit() => {}
1235 FfiResult::FfiUnsafe { ty, reason, help } => {
1236 self.emit_ffi_unsafe_type_lint(ty, sp, reason, help);
1241 fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) {
1242 let def_id = self.cx.tcx.hir().local_def_id(id);
1243 let sig = self.cx.tcx.fn_sig(def_id);
1244 let sig = self.cx.tcx.erase_late_bound_regions(sig);
1246 for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
1247 self.check_type_for_ffi_and_report_errors(input_hir.span, *input_ty, false, false);
1250 if let hir::FnRetTy::Return(ref ret_hir) = decl.output {
1251 let ret_ty = sig.output();
1252 self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false, true);
1256 fn check_foreign_static(&mut self, id: hir::HirId, span: Span) {
1257 let def_id = self.cx.tcx.hir().local_def_id(id);
1258 let ty = self.cx.tcx.type_of(def_id);
1259 self.check_type_for_ffi_and_report_errors(span, ty, true, false);
1262 fn is_internal_abi(&self, abi: SpecAbi) -> bool {
1265 SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic
1270 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
1271 fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) {
1272 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
1273 let abi = cx.tcx.hir().get_foreign_abi(it.hir_id());
1275 if !vis.is_internal_abi(abi) {
1277 hir::ForeignItemKind::Fn(ref decl, _, _) => {
1278 vis.check_foreign_fn(it.hir_id(), decl);
1280 hir::ForeignItemKind::Static(ref ty, _) => {
1281 vis.check_foreign_static(it.hir_id(), ty.span);
1283 hir::ForeignItemKind::Type => (),
1289 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
1292 cx: &LateContext<'tcx>,
1293 kind: hir::intravisit::FnKind<'tcx>,
1294 decl: &'tcx hir::FnDecl<'_>,
1295 _: &'tcx hir::Body<'_>,
1299 use hir::intravisit::FnKind;
1301 let abi = match kind {
1302 FnKind::ItemFn(_, _, header, ..) => header.abi,
1303 FnKind::Method(_, sig, ..) => sig.header.abi,
1307 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
1308 if !vis.is_internal_abi(abi) {
1309 vis.check_foreign_fn(hir_id, decl);
1314 declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
1316 impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
1317 fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
1318 if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
1319 let t = cx.tcx.type_of(it.owner_id);
1320 let ty = cx.tcx.erase_regions(t);
1321 let Ok(layout) = cx.layout_of(ty) else { return };
1322 let Variants::Multiple {
1323 tag_encoding: TagEncoding::Direct, tag, ref variants, ..
1324 } = &layout.variants else {
1328 let tag_size = tag.size(&cx.tcx).bytes();
1331 "enum `{}` is {} bytes large with layout:\n{:#?}",
1333 layout.size.bytes(),
1337 let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
1338 .map(|(variant, variant_layout)| {
1339 // Subtract the size of the enum tag.
1340 let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
1342 debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
1346 .fold((0, 0, 0), |(l, s, li), (idx, size)| {
1349 } else if size > s {
1356 // We only warn if the largest variant is at least thrice as large as
1357 // the second-largest.
1358 if largest > slargest * 3 && slargest > 0 {
1359 cx.emit_spanned_lint(
1360 VARIANT_SIZE_DIFFERENCES,
1361 enum_definition.variants[largest_index].span,
1362 VariantSizeDifferencesDiag { largest },
1370 /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
1371 /// to an atomic operation that does not support that ordering.
1375 /// ```rust,compile_fail
1376 /// # use core::sync::atomic::{AtomicU8, Ordering};
1377 /// let atom = AtomicU8::new(0);
1378 /// let value = atom.load(Ordering::Release);
1379 /// # let _ = value;
1386 /// Some atomic operations are only supported for a subset of the
1387 /// `atomic::Ordering` variants. Passing an unsupported variant will cause
1388 /// an unconditional panic at runtime, which is detected by this lint.
1390 /// This lint will trigger in the following cases: (where `AtomicType` is an
1391 /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
1392 /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
1394 /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
1395 /// `AtomicType::store`.
1397 /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
1398 /// `AtomicType::load`.
1400 /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
1401 /// `core::sync::atomic::compiler_fence`.
1403 /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
1404 /// ordering for any of `AtomicType::compare_exchange`,
1405 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
1406 INVALID_ATOMIC_ORDERING,
1408 "usage of invalid atomic ordering in atomic operations and memory fences"
1411 declare_lint_pass!(InvalidAtomicOrdering => [INVALID_ATOMIC_ORDERING]);
1413 impl InvalidAtomicOrdering {
1414 fn inherent_atomic_method_call<'hir>(
1415 cx: &LateContext<'_>,
1417 recognized_names: &[Symbol], // used for fast path calculation
1418 ) -> Option<(Symbol, &'hir [Expr<'hir>])> {
1419 const ATOMIC_TYPES: &[Symbol] = &[
1435 if let ExprKind::MethodCall(ref method_path, _, args, _) = &expr.kind
1436 && recognized_names.contains(&method_path.ident.name)
1437 && let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
1438 && let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
1439 && let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def()
1440 // skip extension traits, only lint functions from the standard library
1441 && cx.tcx.trait_id_of_impl(impl_did).is_none()
1442 && let parent = cx.tcx.parent(adt.did())
1443 && cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
1444 && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
1446 return Some((method_path.ident.name, args));
1451 fn match_ordering(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<Symbol> {
1452 let ExprKind::Path(ref ord_qpath) = ord_arg.kind else { return None };
1453 let did = cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()?;
1455 let atomic_ordering = tcx.get_diagnostic_item(sym::Ordering);
1456 let name = tcx.item_name(did);
1457 let parent = tcx.parent(did);
1458 [sym::Relaxed, sym::Release, sym::Acquire, sym::AcqRel, sym::SeqCst].into_iter().find(
1461 && (Some(parent) == atomic_ordering
1462 // needed in case this is a ctor, not a variant
1463 || tcx.opt_parent(parent) == atomic_ordering)
1468 fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
1469 if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
1470 && let Some((ordering_arg, invalid_ordering)) = match method {
1471 sym::load => Some((&args[0], sym::Release)),
1472 sym::store => Some((&args[1], sym::Acquire)),
1475 && let Some(ordering) = Self::match_ordering(cx, ordering_arg)
1476 && (ordering == invalid_ordering || ordering == sym::AcqRel)
1478 if method == sym::load {
1479 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingLoad);
1481 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingStore);
1486 fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
1487 if let ExprKind::Call(ref func, ref args) = expr.kind
1488 && let ExprKind::Path(ref func_qpath) = func.kind
1489 && let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id()
1490 && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
1491 && Self::match_ordering(cx, &args[0]) == Some(sym::Relaxed)
1493 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, args[0].span, AtomicOrderingFence);
1497 fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
1498 let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak])
1501 let fail_order_arg = match method {
1502 sym::fetch_update => &args[1],
1503 sym::compare_exchange | sym::compare_exchange_weak => &args[3],
1507 let Some(fail_ordering) = Self::match_ordering(cx, fail_order_arg) else { return };
1509 if matches!(fail_ordering, sym::Release | sym::AcqRel) {
1510 cx.emit_spanned_lint(
1511 INVALID_ATOMIC_ORDERING,
1512 fail_order_arg.span,
1513 InvalidAtomicOrderingDiag { method, fail_order_arg_span: fail_order_arg.span },
1519 impl<'tcx> LateLintPass<'tcx> for InvalidAtomicOrdering {
1520 fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
1521 Self::check_atomic_load_store(cx, expr);
1522 Self::check_memory_fence(cx, expr);
1523 Self::check_atomic_compare_exchange(cx, expr);