2 AtomicOrderingFence, AtomicOrderingLoad, AtomicOrderingStore, ImproperCTypes,
3 InvalidAtomicOrderingDiag, OnlyCastu8ToChar, OverflowingBinHex, OverflowingBinHexSign,
4 OverflowingBinHexSub, OverflowingInt, OverflowingIntHelp, OverflowingLiteral, OverflowingUInt,
5 RangeEndpointOutOfRange, UnusedComparisons, VariantSizeDifferencesDiag,
7 use crate::{LateContext, LateLintPass, LintContext};
9 use rustc_attr as attr;
10 use rustc_data_structures::fx::FxHashSet;
11 use rustc_errors::{fluent, DiagnosticMessage};
13 use rustc_hir::{is_range_literal, Expr, ExprKind, Node};
14 use rustc_middle::ty::layout::{IntegerExt, LayoutOf, SizeSkeleton};
15 use rustc_middle::ty::subst::SubstsRef;
16 use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable};
17 use rustc_span::source_map;
18 use rustc_span::symbol::sym;
19 use rustc_span::{Span, Symbol};
20 use rustc_target::abi::{Abi, Size, WrappingRange};
21 use rustc_target::abi::{Integer, TagEncoding, Variants};
22 use rustc_target::spec::abi::Abi as SpecAbi;
25 use std::ops::ControlFlow;
28 /// The `unused_comparisons` lint detects comparisons made useless by
29 /// limits of the types involved.
43 /// A useless comparison may indicate a mistake, and should be fixed or
47 "comparisons made useless by limits of the types involved"
51 /// The `overflowing_literals` lint detects literal out of range for its
56 /// ```rust,compile_fail
64 /// It is usually a mistake to use a literal that overflows the type where
65 /// it is used. Either use a literal that is within range, or change the
66 /// type to be within the range of the literal.
69 "literal out of range for its type"
73 /// The `variant_size_differences` lint detects enums with widely varying
78 /// ```rust,compile_fail
79 /// #![deny(variant_size_differences)]
90 /// It can be a mistake to add a variant to an enum that is much larger
91 /// than the other variants, bloating the overall size required for all
92 /// variants. This can impact performance and memory usage. This is
93 /// triggered if one variant is more than 3 times larger than the
94 /// second-largest variant.
96 /// Consider placing the large variant's contents on the heap (for example
97 /// via [`Box`]) to keep the overall size of the enum itself down.
99 /// This lint is "allow" by default because it can be noisy, and may not be
100 /// an actual problem. Decisions about this should be guided with
101 /// profiling and benchmarking.
103 /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
104 VARIANT_SIZE_DIFFERENCES,
106 "detects enums with widely varying variant sizes"
109 #[derive(Copy, Clone)]
110 pub struct TypeLimits {
111 /// Id of the last visited negated expression
112 negated_expr_id: Option<hir::HirId>,
115 impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]);
118 pub fn new() -> TypeLimits {
119 TypeLimits { negated_expr_id: None }
123 /// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint (`expr..MAX+1`).
124 /// Returns `true` iff the lint was emitted.
125 fn lint_overflowing_range_endpoint<'tcx>(
126 cx: &LateContext<'tcx>,
130 expr: &'tcx hir::Expr<'tcx>,
133 // We only want to handle exclusive (`..`) ranges,
134 // which are represented as `ExprKind::Struct`.
135 let par_id = cx.tcx.hir().parent_id(expr.hir_id);
136 let Node::ExprField(field) = cx.tcx.hir().get(par_id) else { return false };
137 let Node::Expr(struct_expr) = cx.tcx.hir().get_parent(field.hir_id) else { return false };
138 if !is_range_literal(struct_expr) {
141 let ExprKind::Struct(_, eps, _) = &struct_expr.kind else { return false };
146 // We can suggest using an inclusive range
147 // (`..=`) instead only if it is the `end` that is
148 // overflowing and only by 1.
149 if !(eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max) {
152 let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) else { return false };
154 use rustc_ast::{LitIntType, LitKind};
155 let suffix = match lit.node {
156 LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
157 LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
158 LitKind::Int(_, LitIntType::Unsuffixed) => "",
161 cx.emit_spanned_lint(
162 OVERFLOWING_LITERALS,
164 RangeEndpointOutOfRange {
166 suggestion: struct_expr.span,
168 literal: lit_val - 1,
173 // We've just emitted a lint, special cased for `(...)..MAX+1` ranges,
174 // return `true` so the callers don't also emit a lint
178 // For `isize` & `usize`, be conservative with the warnings, so that the
179 // warnings are consistent between 32- and 64-bit platforms.
180 fn int_ty_range(int_ty: ty::IntTy) -> (i128, i128) {
182 ty::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()),
183 ty::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()),
184 ty::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()),
185 ty::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()),
186 ty::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()),
187 ty::IntTy::I128 => (i128::MIN, i128::MAX),
191 fn uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128) {
192 let max = match uint_ty {
193 ty::UintTy::Usize => u64::MAX.into(),
194 ty::UintTy::U8 => u8::MAX.into(),
195 ty::UintTy::U16 => u16::MAX.into(),
196 ty::UintTy::U32 => u32::MAX.into(),
197 ty::UintTy::U64 => u64::MAX.into(),
198 ty::UintTy::U128 => u128::MAX,
203 fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> {
204 let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?;
205 let firstch = src.chars().next()?;
208 match src.chars().nth(1) {
209 Some('x' | 'b') => return Some(src),
217 fn report_bin_hex_error(
218 cx: &LateContext<'_>,
219 expr: &hir::Expr<'_>,
226 let (t, actually) = match ty {
227 attr::IntType::SignedInt(t) => {
228 let actually = if negative {
229 -(size.sign_extend(val) as i128)
231 size.sign_extend(val) as i128
233 (t.name_str(), actually.to_string())
235 attr::IntType::UnsignedInt(t) => {
236 let actually = size.truncate(val);
237 (t.name_str(), actually.to_string())
241 if negative { OverflowingBinHexSign::Negative } else { OverflowingBinHexSign::Positive };
242 let sub = get_type_suggestion(cx.typeck_results().node_type(expr.hir_id), val, negative).map(
244 if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
245 let (sans_suffix, _) = repr_str.split_at(pos);
246 OverflowingBinHexSub::Suggestion { span: expr.span, suggestion_ty, sans_suffix }
248 OverflowingBinHexSub::Help { suggestion_ty }
252 cx.emit_spanned_lint(
253 OVERFLOWING_LITERALS,
255 OverflowingBinHex { ty: t, lit: repr_str.clone(), dec: val, actually, sign, sub },
259 // This function finds the next fitting type and generates a suggestion string.
260 // It searches for fitting types in the following way (`X < Y`):
261 // - `iX`: if literal fits in `uX` => `uX`, else => `iY`
265 // No suggestion for: `isize`, `usize`.
266 fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> {
269 macro_rules! find_fit {
270 ($ty:expr, $val:expr, $negative:expr,
271 $($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => {
273 let _neg = if negative { 1 } else { 0 };
276 $(if !negative && val <= uint_ty_range($utypes).1 {
277 return Some($utypes.name_str())
279 $(if val <= int_ty_range($itypes).1 as u128 + _neg {
280 return Some($itypes.name_str())
290 ty::Int(i) => find_fit!(i, val, negative,
291 I8 => [U8] => [I16, I32, I64, I128],
292 I16 => [U16] => [I32, I64, I128],
293 I32 => [U32] => [I64, I128],
294 I64 => [U64] => [I128],
295 I128 => [U128] => []),
296 ty::Uint(u) => find_fit!(u, val, negative,
297 U8 => [U8, U16, U32, U64, U128] => [],
298 U16 => [U16, U32, U64, U128] => [],
299 U32 => [U32, U64, U128] => [],
300 U64 => [U64, U128] => [],
301 U128 => [U128] => []),
306 fn lint_int_literal<'tcx>(
307 cx: &LateContext<'tcx>,
308 type_limits: &TypeLimits,
309 e: &'tcx hir::Expr<'tcx>,
314 let int_type = t.normalize(cx.sess().target.pointer_width);
315 let (min, max) = int_ty_range(int_type);
316 let max = max as u128;
317 let negative = type_limits.negated_expr_id == Some(e.hir_id);
319 // Detect literal value out of range [min, max] inclusive
320 // avoiding use of -min to prevent overflow/panic
321 if (negative && v > max + 1) || (!negative && v > max) {
322 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
323 report_bin_hex_error(
326 attr::IntType::SignedInt(ty::ast_int_ty(t)),
327 Integer::from_int_ty(cx, t).size(),
335 if lint_overflowing_range_endpoint(cx, lit, v, max, e, t.name_str()) {
336 // The overflowing literal lint was emitted by `lint_overflowing_range_endpoint`.
343 .span_to_snippet(lit.span)
344 .expect("must get snippet from literal");
345 let help = get_type_suggestion(cx.typeck_results().node_type(e.hir_id), v, negative)
346 .map(|suggestion_ty| OverflowingIntHelp { suggestion_ty });
348 cx.emit_spanned_lint(
349 OVERFLOWING_LITERALS,
351 OverflowingInt { ty: t.name_str(), lit, min, max, help },
356 fn lint_uint_literal<'tcx>(
357 cx: &LateContext<'tcx>,
358 e: &'tcx hir::Expr<'tcx>,
362 let uint_type = t.normalize(cx.sess().target.pointer_width);
363 let (min, max) = uint_ty_range(uint_type);
364 let lit_val: u128 = match lit.node {
365 // _v is u8, within range by definition
366 ast::LitKind::Byte(_v) => return,
367 ast::LitKind::Int(v, _) => v,
370 if lit_val < min || lit_val > max {
371 let parent_id = cx.tcx.hir().parent_id(e.hir_id);
372 if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) {
374 hir::ExprKind::Cast(..) => {
375 if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() {
376 cx.emit_spanned_lint(
377 OVERFLOWING_LITERALS,
379 OnlyCastu8ToChar { span: par_e.span, literal: lit_val },
387 if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, t.name_str()) {
388 // The overflowing literal lint was emitted by `lint_overflowing_range_endpoint`.
391 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
392 report_bin_hex_error(
395 attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
396 Integer::from_uint_ty(cx, t).size(),
403 cx.emit_spanned_lint(
404 OVERFLOWING_LITERALS,
411 .span_to_snippet(lit.span)
412 .expect("must get snippet from literal"),
420 fn lint_literal<'tcx>(
421 cx: &LateContext<'tcx>,
422 type_limits: &TypeLimits,
423 e: &'tcx hir::Expr<'tcx>,
426 match *cx.typeck_results().node_type(e.hir_id).kind() {
429 ast::LitKind::Int(v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed) => {
430 lint_int_literal(cx, type_limits, e, lit, t, v)
435 ty::Uint(t) => lint_uint_literal(cx, e, lit, t),
437 let is_infinite = match lit.node {
438 ast::LitKind::Float(v, _) => match t {
439 ty::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite),
440 ty::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite),
444 if is_infinite == Ok(true) {
445 cx.emit_spanned_lint(
446 OVERFLOWING_LITERALS,
453 .span_to_snippet(lit.span)
454 .expect("must get snippet from literal"),
463 impl<'tcx> LateLintPass<'tcx> for TypeLimits {
464 fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
466 hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
467 // propagate negation, if the negation itself isn't negated
468 if self.negated_expr_id != Some(e.hir_id) {
469 self.negated_expr_id = Some(expr.hir_id);
472 hir::ExprKind::Binary(binop, ref l, ref r) => {
473 if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
474 cx.emit_spanned_lint(UNUSED_COMPARISONS, e.span, UnusedComparisons);
477 hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
481 fn is_valid<T: PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool {
483 hir::BinOpKind::Lt => v > min && v <= max,
484 hir::BinOpKind::Le => v >= min && v < max,
485 hir::BinOpKind::Gt => v >= min && v < max,
486 hir::BinOpKind::Ge => v > min && v <= max,
487 hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
492 fn rev_binop(binop: hir::BinOp) -> hir::BinOp {
496 hir::BinOpKind::Lt => hir::BinOpKind::Gt,
497 hir::BinOpKind::Le => hir::BinOpKind::Ge,
498 hir::BinOpKind::Gt => hir::BinOpKind::Lt,
499 hir::BinOpKind::Ge => hir::BinOpKind::Le,
506 cx: &LateContext<'_>,
511 let (lit, expr, swap) = match (&l.kind, &r.kind) {
512 (&hir::ExprKind::Lit(_), _) => (l, r, true),
513 (_, &hir::ExprKind::Lit(_)) => (r, l, false),
516 // Normalize the binop so that the literal is always on the RHS in
518 let norm_binop = if swap { rev_binop(binop) } else { binop };
519 match *cx.typeck_results().node_type(expr.hir_id).kind() {
521 let (min, max) = int_ty_range(int_ty);
522 let lit_val: i128 = match lit.kind {
523 hir::ExprKind::Lit(ref li) => match li.node {
526 ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed,
532 is_valid(norm_binop, lit_val, min, max)
534 ty::Uint(uint_ty) => {
535 let (min, max): (u128, u128) = uint_ty_range(uint_ty);
536 let lit_val: u128 = match lit.kind {
537 hir::ExprKind::Lit(ref li) => match li.node {
538 ast::LitKind::Int(v, _) => v,
543 is_valid(norm_binop, lit_val, min, max)
549 fn is_comparison(binop: hir::BinOp) -> bool {
564 /// The `improper_ctypes` lint detects incorrect use of types in foreign
571 /// static STATIC: String;
579 /// The compiler has several checks to verify that types used in `extern`
580 /// blocks are safe and follow certain rules to ensure proper
581 /// compatibility with the foreign interfaces. This lint is issued when it
582 /// detects a probable mistake in a definition. The lint usually should
583 /// provide a description of the issue, along with possibly a hint on how
587 "proper use of libc types in foreign modules"
590 declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]);
593 /// The `improper_ctypes_definitions` lint detects incorrect use of
594 /// [`extern` function] definitions.
596 /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
601 /// # #![allow(unused)]
602 /// pub extern "C" fn str_type(p: &str) { }
609 /// There are many parameter and return types that may be specified in an
610 /// `extern` function that are not compatible with the given ABI. This
611 /// lint is an alert that these types should not be used. The lint usually
612 /// should provide a description of the issue, along with possibly a hint
613 /// on how to resolve it.
614 IMPROPER_CTYPES_DEFINITIONS,
616 "proper use of libc types in foreign item definitions"
619 declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS]);
621 #[derive(Clone, Copy)]
622 pub(crate) enum CItemKind {
627 struct ImproperCTypesVisitor<'a, 'tcx> {
628 cx: &'a LateContext<'tcx>,
632 enum FfiResult<'tcx> {
634 FfiPhantom(Ty<'tcx>),
635 FfiUnsafe { ty: Ty<'tcx>, reason: DiagnosticMessage, help: Option<DiagnosticMessage> },
638 pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
640 def: ty::AdtDef<'tcx>,
642 tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
645 /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
647 pub fn transparent_newtype_field<'a, 'tcx>(
649 variant: &'a ty::VariantDef,
650 ) -> Option<&'a ty::FieldDef> {
651 let param_env = tcx.param_env(variant.def_id);
652 variant.fields.iter().find(|field| {
653 let field_ty = tcx.type_of(field.did);
654 let is_zst = tcx.layout_of(param_env.and(field_ty)).map_or(false, |layout| layout.is_zst());
659 /// Is type known to be non-null?
660 fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
663 ty::FnPtr(_) => true,
665 ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
666 ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
667 let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
673 // `UnsafeCell` has its niche hidden.
674 if def.is_unsafe_cell() {
680 .filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
681 .any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
687 /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
688 /// If the type passed in was not scalar, returns None.
689 fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
691 Some(match *ty.kind() {
692 ty::Adt(field_def, field_substs) => {
693 let inner_field_ty = {
694 let mut first_non_zst_ty = field_def
697 .filter_map(|v| transparent_newtype_field(cx.tcx, v));
699 first_non_zst_ty.clone().count(),
701 "Wrong number of fields for transparent type"
705 .expect("No non-zst fields in transparent type.")
706 .ty(tcx, field_substs)
708 return get_nullable_type(cx, inner_field_ty);
710 ty::Int(ty) => tcx.mk_mach_int(ty),
711 ty::Uint(ty) => tcx.mk_mach_uint(ty),
712 ty::RawPtr(ty_mut) => tcx.mk_ptr(ty_mut),
713 // As these types are always non-null, the nullable equivalent of
714 // Option<T> of these types are their raw pointer counterparts.
715 ty::Ref(_region, ty, mutbl) => tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }),
717 // There is no nullable equivalent for Rust's function pointers -- you
718 // must use an Option<fn(..) -> _> to represent it.
722 // We should only ever reach this case if ty_is_known_nonnull is extended
726 "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
734 /// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
735 /// can, return the type that `ty` can be safely converted to, otherwise return `None`.
736 /// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`,
737 /// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
738 /// FIXME: This duplicates code in codegen.
739 pub(crate) fn repr_nullable_ptr<'tcx>(
740 cx: &LateContext<'tcx>,
743 ) -> Option<Ty<'tcx>> {
744 debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
745 if let ty::Adt(ty_def, substs) = ty.kind() {
746 let field_ty = match &ty_def.variants().raw[..] {
747 [var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
748 ([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
754 if !ty_is_known_nonnull(cx, field_ty, ckind) {
758 // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
759 // If the computed size for the field and the enum are different, the nonnull optimization isn't
760 // being applied (and we've got a problem somewhere).
761 let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap();
762 if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
763 bug!("improper_ctypes: Option nonnull optimization not applied?");
766 // Return the nullable type this Option-like enum can be safely represented with.
767 let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
768 if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
769 match field_ty_scalar.valid_range(cx) {
770 WrappingRange { start: 0, end }
771 if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
773 return Some(get_nullable_type(cx, field_ty).unwrap());
775 WrappingRange { start: 1, .. } => {
776 return Some(get_nullable_type(cx, field_ty).unwrap());
778 WrappingRange { start, end } => {
779 unreachable!("Unhandled start and end range: ({}, {})", start, end)
787 impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
788 /// Check if the type is array and emit an unsafe type lint.
789 fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
790 if let ty::Array(..) = ty.kind() {
791 self.emit_ffi_unsafe_type_lint(
794 fluent::lint_improper_ctypes_array_reason,
795 Some(fluent::lint_improper_ctypes_array_help),
803 /// Checks if the given field's type is "ffi-safe".
804 fn check_field_type_for_ffi(
806 cache: &mut FxHashSet<Ty<'tcx>>,
807 field: &ty::FieldDef,
808 substs: SubstsRef<'tcx>,
809 ) -> FfiResult<'tcx> {
810 let field_ty = field.ty(self.cx.tcx, substs);
811 if field_ty.has_opaque_types() {
812 self.check_type_for_ffi(cache, field_ty)
814 let field_ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, field_ty);
815 self.check_type_for_ffi(cache, field_ty)
819 /// Checks if the given `VariantDef`'s field types are "ffi-safe".
820 fn check_variant_for_ffi(
822 cache: &mut FxHashSet<Ty<'tcx>>,
824 def: ty::AdtDef<'tcx>,
825 variant: &ty::VariantDef,
826 substs: SubstsRef<'tcx>,
827 ) -> FfiResult<'tcx> {
830 let transparent_safety = def.repr().transparent().then(|| {
831 // Can assume that at most one field is not a ZST, so only check
832 // that field's type for FFI-safety.
833 if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
834 return self.check_field_type_for_ffi(cache, field, substs);
836 // All fields are ZSTs; this means that the type should behave
837 // like (), which is FFI-unsafe... except if all fields are PhantomData,
838 // which is tested for below
839 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None }
842 // We can't completely trust repr(C) markings; make sure the fields are
844 let mut all_phantom = !variant.fields.is_empty();
845 for field in &variant.fields {
846 match self.check_field_type_for_ffi(cache, &field, substs) {
850 FfiPhantom(..) if !def.repr().transparent() && def.is_enum() => {
853 reason: fluent::lint_improper_ctypes_enum_phantomdata,
858 r => return transparent_safety.unwrap_or(r),
862 if all_phantom { FfiPhantom(ty) } else { transparent_safety.unwrap_or(FfiSafe) }
865 /// Checks if the given type is "ffi-safe" (has a stable, well-defined
866 /// representation which can be exported to C code).
867 fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> {
870 let tcx = self.cx.tcx;
872 // Protect against infinite recursion, for example
873 // `struct S(*mut S);`.
874 // FIXME: A recursion limit is necessary as well, for irregular
876 if !cache.insert(ty) {
881 ty::Adt(def, substs) => {
882 if def.is_box() && matches!(self.mode, CItemKind::Definition) {
883 if ty.boxed_ty().is_sized(tcx, self.cx.param_env) {
888 reason: fluent::lint_improper_ctypes_box,
893 if def.is_phantom_data() {
894 return FfiPhantom(ty);
896 match def.adt_kind() {
897 AdtKind::Struct | AdtKind::Union => {
898 if !def.repr().c() && !def.repr().transparent() {
901 reason: if def.is_struct() {
902 fluent::lint_improper_ctypes_struct_layout_reason
904 fluent::lint_improper_ctypes_union_layout_reason
906 help: if def.is_struct() {
907 Some(fluent::lint_improper_ctypes_struct_layout_help)
909 Some(fluent::lint_improper_ctypes_union_layout_help)
914 let is_non_exhaustive =
915 def.non_enum_variant().is_field_list_non_exhaustive();
916 if is_non_exhaustive && !def.did().is_local() {
919 reason: if def.is_struct() {
920 fluent::lint_improper_ctypes_struct_non_exhaustive
922 fluent::lint_improper_ctypes_union_non_exhaustive
928 if def.non_enum_variant().fields.is_empty() {
931 reason: if def.is_struct() {
932 fluent::lint_improper_ctypes_struct_fieldless_reason
934 fluent::lint_improper_ctypes_union_fieldless_reason
936 help: if def.is_struct() {
937 Some(fluent::lint_improper_ctypes_struct_fieldless_help)
939 Some(fluent::lint_improper_ctypes_union_fieldless_help)
944 self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
947 if def.variants().is_empty() {
948 // Empty enums are okay... although sort of useless.
952 // Check for a repr() attribute to specify the size of the
954 if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
956 // Special-case types like `Option<extern fn()>`.
957 if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
960 reason: fluent::lint_improper_ctypes_enum_repr_reason,
961 help: Some(fluent::lint_improper_ctypes_enum_repr_help),
966 if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
969 reason: fluent::lint_improper_ctypes_non_exhaustive,
974 // Check the contained variants.
975 for variant in def.variants() {
976 let is_non_exhaustive = variant.is_field_list_non_exhaustive();
977 if is_non_exhaustive && !variant.def_id.is_local() {
980 reason: fluent::lint_improper_ctypes_non_exhaustive_variant,
985 match self.check_variant_for_ffi(cache, ty, def, variant, substs) {
996 ty::Char => FfiUnsafe {
998 reason: fluent::lint_improper_ctypes_char_reason,
999 help: Some(fluent::lint_improper_ctypes_char_help),
1002 ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
1003 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_128bit, help: None }
1006 // Primitive types with a stable representation.
1007 ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
1009 ty::Slice(_) => FfiUnsafe {
1011 reason: fluent::lint_improper_ctypes_slice_reason,
1012 help: Some(fluent::lint_improper_ctypes_slice_help),
1015 ty::Dynamic(..) => {
1016 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None }
1019 ty::Str => FfiUnsafe {
1021 reason: fluent::lint_improper_ctypes_str_reason,
1022 help: Some(fluent::lint_improper_ctypes_str_help),
1025 ty::Tuple(..) => FfiUnsafe {
1027 reason: fluent::lint_improper_ctypes_tuple_reason,
1028 help: Some(fluent::lint_improper_ctypes_tuple_help),
1031 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _)
1033 matches!(self.mode, CItemKind::Definition)
1034 && ty.is_sized(self.cx.tcx, self.cx.param_env)
1040 ty::RawPtr(ty::TypeAndMut { ty, .. })
1041 if match ty.kind() {
1042 ty::Tuple(tuple) => tuple.is_empty(),
1049 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
1050 self.check_type_for_ffi(cache, ty)
1053 ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty),
1056 if self.is_internal_abi(sig.abi()) {
1059 reason: fluent::lint_improper_ctypes_fnptr_reason,
1060 help: Some(fluent::lint_improper_ctypes_fnptr_help),
1064 let sig = tcx.erase_late_bound_regions(sig);
1065 if !sig.output().is_unit() {
1066 let r = self.check_type_for_ffi(cache, sig.output());
1074 for arg in sig.inputs() {
1075 let r = self.check_type_for_ffi(cache, *arg);
1086 ty::Foreign(..) => FfiSafe,
1088 // While opaque types are checked for earlier, if a projection in a struct field
1089 // normalizes to an opaque type, then it will reach this branch.
1090 ty::Alias(ty::Opaque, ..) => {
1091 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None }
1094 // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
1095 // so they are currently ignored for the purposes of this lint.
1096 ty::Param(..) | ty::Alias(ty::Projection, ..)
1097 if matches!(self.mode, CItemKind::Definition) =>
1103 | ty::Alias(ty::Projection, ..)
1109 | ty::GeneratorWitness(..)
1110 | ty::Placeholder(..)
1111 | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
1115 fn emit_ffi_unsafe_type_lint(
1119 note: DiagnosticMessage,
1120 help: Option<DiagnosticMessage>,
1122 let lint = match self.mode {
1123 CItemKind::Declaration => IMPROPER_CTYPES,
1124 CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
1126 let desc = match self.mode {
1127 CItemKind::Declaration => "block",
1128 CItemKind::Definition => "fn",
1130 let span_note = if let ty::Adt(def, _) = ty.kind()
1131 && let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
1136 self.cx.emit_spanned_lint(
1139 ImproperCTypes { ty, desc, label: sp, help, note, span_note },
1143 fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
1144 struct ProhibitOpaqueTypes;
1145 impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueTypes {
1146 type BreakTy = Ty<'tcx>;
1148 fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
1149 if !ty.has_opaque_types() {
1150 return ControlFlow::Continue(());
1153 if let ty::Alias(ty::Opaque, ..) = ty.kind() {
1154 ControlFlow::Break(ty)
1156 ty.super_visit_with(self)
1161 if let Some(ty) = self
1164 .normalize_erasing_regions(self.cx.param_env, ty)
1165 .visit_with(&mut ProhibitOpaqueTypes)
1168 self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint_improper_ctypes_opaque, None);
1175 fn check_type_for_ffi_and_report_errors(
1180 is_return_type: bool,
1182 // We have to check for opaque types before `normalize_erasing_regions`,
1183 // which will replace opaque types with their underlying concrete type.
1184 if self.check_for_opaque_ty(sp, ty) {
1185 // We've already emitted an error due to an opaque type.
1189 // it is only OK to use this function because extern fns cannot have
1190 // any generic types right now:
1191 let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
1193 // C doesn't really support passing arrays by value - the only way to pass an array by value
1194 // is through a struct. So, first test that the top level isn't an array, and then
1195 // recursively check the types inside.
1196 if !is_static && self.check_for_array_ty(sp, ty) {
1200 // Don't report FFI errors for unit return types. This check exists here, and not in
1201 // `check_foreign_fn` (where it would make more sense) so that normalization has definitely
1203 if is_return_type && ty.is_unit() {
1207 match self.check_type_for_ffi(&mut FxHashSet::default(), ty) {
1208 FfiResult::FfiSafe => {}
1209 FfiResult::FfiPhantom(ty) => {
1210 self.emit_ffi_unsafe_type_lint(
1213 fluent::lint_improper_ctypes_only_phantomdata,
1217 // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic
1218 // argument, which after substitution, is `()`, then this branch can be hit.
1219 FfiResult::FfiUnsafe { ty, .. } if is_return_type && ty.is_unit() => {}
1220 FfiResult::FfiUnsafe { ty, reason, help } => {
1221 self.emit_ffi_unsafe_type_lint(ty, sp, reason, help);
1226 fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) {
1227 let def_id = self.cx.tcx.hir().local_def_id(id);
1228 let sig = self.cx.tcx.bound_fn_sig(def_id.into()).subst_identity();
1229 let sig = self.cx.tcx.erase_late_bound_regions(sig);
1231 for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
1232 self.check_type_for_ffi_and_report_errors(input_hir.span, *input_ty, false, false);
1235 if let hir::FnRetTy::Return(ref ret_hir) = decl.output {
1236 let ret_ty = sig.output();
1237 self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false, true);
1241 fn check_foreign_static(&mut self, id: hir::HirId, span: Span) {
1242 let def_id = self.cx.tcx.hir().local_def_id(id);
1243 let ty = self.cx.tcx.type_of(def_id);
1244 self.check_type_for_ffi_and_report_errors(span, ty, true, false);
1247 fn is_internal_abi(&self, abi: SpecAbi) -> bool {
1250 SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic
1255 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
1256 fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) {
1257 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
1258 let abi = cx.tcx.hir().get_foreign_abi(it.hir_id());
1260 if !vis.is_internal_abi(abi) {
1262 hir::ForeignItemKind::Fn(ref decl, _, _) => {
1263 vis.check_foreign_fn(it.hir_id(), decl);
1265 hir::ForeignItemKind::Static(ref ty, _) => {
1266 vis.check_foreign_static(it.hir_id(), ty.span);
1268 hir::ForeignItemKind::Type => (),
1274 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
1277 cx: &LateContext<'tcx>,
1278 kind: hir::intravisit::FnKind<'tcx>,
1279 decl: &'tcx hir::FnDecl<'_>,
1280 _: &'tcx hir::Body<'_>,
1284 use hir::intravisit::FnKind;
1286 let abi = match kind {
1287 FnKind::ItemFn(_, _, header, ..) => header.abi,
1288 FnKind::Method(_, sig, ..) => sig.header.abi,
1292 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
1293 if !vis.is_internal_abi(abi) {
1294 vis.check_foreign_fn(hir_id, decl);
1299 declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
1301 impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
1302 fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
1303 if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
1304 let t = cx.tcx.type_of(it.owner_id);
1305 let ty = cx.tcx.erase_regions(t);
1306 let Ok(layout) = cx.layout_of(ty) else { return };
1307 let Variants::Multiple {
1308 tag_encoding: TagEncoding::Direct, tag, ref variants, ..
1309 } = &layout.variants else {
1313 let tag_size = tag.size(&cx.tcx).bytes();
1316 "enum `{}` is {} bytes large with layout:\n{:#?}",
1318 layout.size.bytes(),
1322 let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
1323 .map(|(variant, variant_layout)| {
1324 // Subtract the size of the enum tag.
1325 let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
1327 debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
1331 .fold((0, 0, 0), |(l, s, li), (idx, size)| {
1334 } else if size > s {
1341 // We only warn if the largest variant is at least thrice as large as
1342 // the second-largest.
1343 if largest > slargest * 3 && slargest > 0 {
1344 cx.emit_spanned_lint(
1345 VARIANT_SIZE_DIFFERENCES,
1346 enum_definition.variants[largest_index].span,
1347 VariantSizeDifferencesDiag { largest },
1355 /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
1356 /// to an atomic operation that does not support that ordering.
1360 /// ```rust,compile_fail
1361 /// # use core::sync::atomic::{AtomicU8, Ordering};
1362 /// let atom = AtomicU8::new(0);
1363 /// let value = atom.load(Ordering::Release);
1364 /// # let _ = value;
1371 /// Some atomic operations are only supported for a subset of the
1372 /// `atomic::Ordering` variants. Passing an unsupported variant will cause
1373 /// an unconditional panic at runtime, which is detected by this lint.
1375 /// This lint will trigger in the following cases: (where `AtomicType` is an
1376 /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
1377 /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
1379 /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
1380 /// `AtomicType::store`.
1382 /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
1383 /// `AtomicType::load`.
1385 /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
1386 /// `core::sync::atomic::compiler_fence`.
1388 /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
1389 /// ordering for any of `AtomicType::compare_exchange`,
1390 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
1391 INVALID_ATOMIC_ORDERING,
1393 "usage of invalid atomic ordering in atomic operations and memory fences"
1396 declare_lint_pass!(InvalidAtomicOrdering => [INVALID_ATOMIC_ORDERING]);
1398 impl InvalidAtomicOrdering {
1399 fn inherent_atomic_method_call<'hir>(
1400 cx: &LateContext<'_>,
1402 recognized_names: &[Symbol], // used for fast path calculation
1403 ) -> Option<(Symbol, &'hir [Expr<'hir>])> {
1404 const ATOMIC_TYPES: &[Symbol] = &[
1420 if let ExprKind::MethodCall(ref method_path, _, args, _) = &expr.kind
1421 && recognized_names.contains(&method_path.ident.name)
1422 && let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
1423 && let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
1424 && let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def()
1425 // skip extension traits, only lint functions from the standard library
1426 && cx.tcx.trait_id_of_impl(impl_did).is_none()
1427 && let parent = cx.tcx.parent(adt.did())
1428 && cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
1429 && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
1431 return Some((method_path.ident.name, args));
1436 fn match_ordering(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<Symbol> {
1437 let ExprKind::Path(ref ord_qpath) = ord_arg.kind else { return None };
1438 let did = cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()?;
1440 let atomic_ordering = tcx.get_diagnostic_item(sym::Ordering);
1441 let name = tcx.item_name(did);
1442 let parent = tcx.parent(did);
1443 [sym::Relaxed, sym::Release, sym::Acquire, sym::AcqRel, sym::SeqCst].into_iter().find(
1446 && (Some(parent) == atomic_ordering
1447 // needed in case this is a ctor, not a variant
1448 || tcx.opt_parent(parent) == atomic_ordering)
1453 fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
1454 if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
1455 && let Some((ordering_arg, invalid_ordering)) = match method {
1456 sym::load => Some((&args[0], sym::Release)),
1457 sym::store => Some((&args[1], sym::Acquire)),
1460 && let Some(ordering) = Self::match_ordering(cx, ordering_arg)
1461 && (ordering == invalid_ordering || ordering == sym::AcqRel)
1463 if method == sym::load {
1464 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingLoad);
1466 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingStore);
1471 fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
1472 if let ExprKind::Call(ref func, ref args) = expr.kind
1473 && let ExprKind::Path(ref func_qpath) = func.kind
1474 && let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id()
1475 && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
1476 && Self::match_ordering(cx, &args[0]) == Some(sym::Relaxed)
1478 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, args[0].span, AtomicOrderingFence);
1482 fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
1483 let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak])
1486 let fail_order_arg = match method {
1487 sym::fetch_update => &args[1],
1488 sym::compare_exchange | sym::compare_exchange_weak => &args[3],
1492 let Some(fail_ordering) = Self::match_ordering(cx, fail_order_arg) else { return };
1494 if matches!(fail_ordering, sym::Release | sym::AcqRel) {
1495 cx.emit_spanned_lint(
1496 INVALID_ATOMIC_ORDERING,
1497 fail_order_arg.span,
1498 InvalidAtomicOrderingDiag { method, fail_order_arg_span: fail_order_arg.span },
1504 impl<'tcx> LateLintPass<'tcx> for InvalidAtomicOrdering {
1505 fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
1506 Self::check_atomic_load_store(cx, expr);
1507 Self::check_memory_fence(cx, expr);
1508 Self::check_atomic_compare_exchange(cx, expr);