2 AtomicOrderingFence, AtomicOrderingLoad, AtomicOrderingStore, ImproperCTypes,
3 InvalidAtomicOrderingDiag, OnlyCastu8ToChar, OverflowingBinHex, OverflowingBinHexSign,
4 OverflowingBinHexSub, OverflowingInt, OverflowingIntHelp, OverflowingLiteral, OverflowingUInt,
5 RangeEndpointOutOfRange, UnusedComparisons, VariantSizeDifferencesDiag,
7 use crate::{LateContext, LateLintPass, LintContext};
9 use rustc_attr as attr;
10 use rustc_data_structures::fx::FxHashSet;
11 use rustc_errors::{fluent, DiagnosticMessage};
13 use rustc_hir::{is_range_literal, Expr, ExprKind, Node};
14 use rustc_middle::ty::layout::{IntegerExt, LayoutOf, SizeSkeleton};
15 use rustc_middle::ty::subst::SubstsRef;
16 use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable};
17 use rustc_span::def_id::LocalDefId;
18 use rustc_span::source_map;
19 use rustc_span::symbol::sym;
20 use rustc_span::{Span, Symbol};
21 use rustc_target::abi::{Abi, Size, WrappingRange};
22 use rustc_target::abi::{Integer, TagEncoding, Variants};
23 use rustc_target::spec::abi::Abi as SpecAbi;
26 use std::ops::ControlFlow;
29 /// The `unused_comparisons` lint detects comparisons made useless by
30 /// limits of the types involved.
44 /// A useless comparison may indicate a mistake, and should be fixed or
48 "comparisons made useless by limits of the types involved"
52 /// The `overflowing_literals` lint detects literal out of range for its
57 /// ```rust,compile_fail
65 /// It is usually a mistake to use a literal that overflows the type where
66 /// it is used. Either use a literal that is within range, or change the
67 /// type to be within the range of the literal.
70 "literal out of range for its type"
74 /// The `variant_size_differences` lint detects enums with widely varying
79 /// ```rust,compile_fail
80 /// #![deny(variant_size_differences)]
91 /// It can be a mistake to add a variant to an enum that is much larger
92 /// than the other variants, bloating the overall size required for all
93 /// variants. This can impact performance and memory usage. This is
94 /// triggered if one variant is more than 3 times larger than the
95 /// second-largest variant.
97 /// Consider placing the large variant's contents on the heap (for example
98 /// via [`Box`]) to keep the overall size of the enum itself down.
100 /// This lint is "allow" by default because it can be noisy, and may not be
101 /// an actual problem. Decisions about this should be guided with
102 /// profiling and benchmarking.
104 /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
105 VARIANT_SIZE_DIFFERENCES,
107 "detects enums with widely varying variant sizes"
110 #[derive(Copy, Clone)]
111 pub struct TypeLimits {
112 /// Id of the last visited negated expression
113 negated_expr_id: Option<hir::HirId>,
116 impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]);
119 pub fn new() -> TypeLimits {
120 TypeLimits { negated_expr_id: None }
124 /// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint (`expr..MAX+1`).
125 /// Returns `true` iff the lint was emitted.
126 fn lint_overflowing_range_endpoint<'tcx>(
127 cx: &LateContext<'tcx>,
131 expr: &'tcx hir::Expr<'tcx>,
134 // We only want to handle exclusive (`..`) ranges,
135 // which are represented as `ExprKind::Struct`.
136 let par_id = cx.tcx.hir().parent_id(expr.hir_id);
137 let Node::ExprField(field) = cx.tcx.hir().get(par_id) else { return false };
138 let Node::Expr(struct_expr) = cx.tcx.hir().get_parent(field.hir_id) else { return false };
139 if !is_range_literal(struct_expr) {
142 let ExprKind::Struct(_, eps, _) = &struct_expr.kind else { return false };
147 // We can suggest using an inclusive range
148 // (`..=`) instead only if it is the `end` that is
149 // overflowing and only by 1.
150 if !(eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max) {
153 let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) else { return false };
155 use rustc_ast::{LitIntType, LitKind};
156 let suffix = match lit.node {
157 LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
158 LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
159 LitKind::Int(_, LitIntType::Unsuffixed) => "",
162 cx.emit_spanned_lint(
163 OVERFLOWING_LITERALS,
165 RangeEndpointOutOfRange {
167 suggestion: struct_expr.span,
169 literal: lit_val - 1,
174 // We've just emitted a lint, special cased for `(...)..MAX+1` ranges,
175 // return `true` so the callers don't also emit a lint
179 // For `isize` & `usize`, be conservative with the warnings, so that the
180 // warnings are consistent between 32- and 64-bit platforms.
181 fn int_ty_range(int_ty: ty::IntTy) -> (i128, i128) {
183 ty::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()),
184 ty::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()),
185 ty::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()),
186 ty::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()),
187 ty::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()),
188 ty::IntTy::I128 => (i128::MIN, i128::MAX),
192 fn uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128) {
193 let max = match uint_ty {
194 ty::UintTy::Usize => u64::MAX.into(),
195 ty::UintTy::U8 => u8::MAX.into(),
196 ty::UintTy::U16 => u16::MAX.into(),
197 ty::UintTy::U32 => u32::MAX.into(),
198 ty::UintTy::U64 => u64::MAX.into(),
199 ty::UintTy::U128 => u128::MAX,
204 fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> {
205 let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?;
206 let firstch = src.chars().next()?;
209 match src.chars().nth(1) {
210 Some('x' | 'b') => return Some(src),
218 fn report_bin_hex_error(
219 cx: &LateContext<'_>,
220 expr: &hir::Expr<'_>,
227 let (t, actually) = match ty {
228 attr::IntType::SignedInt(t) => {
229 let actually = if negative {
230 -(size.sign_extend(val) as i128)
232 size.sign_extend(val) as i128
234 (t.name_str(), actually.to_string())
236 attr::IntType::UnsignedInt(t) => {
237 let actually = size.truncate(val);
238 (t.name_str(), actually.to_string())
242 if negative { OverflowingBinHexSign::Negative } else { OverflowingBinHexSign::Positive };
243 let sub = get_type_suggestion(cx.typeck_results().node_type(expr.hir_id), val, negative).map(
245 if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
246 let (sans_suffix, _) = repr_str.split_at(pos);
247 OverflowingBinHexSub::Suggestion { span: expr.span, suggestion_ty, sans_suffix }
249 OverflowingBinHexSub::Help { suggestion_ty }
253 cx.emit_spanned_lint(
254 OVERFLOWING_LITERALS,
256 OverflowingBinHex { ty: t, lit: repr_str.clone(), dec: val, actually, sign, sub },
260 // This function finds the next fitting type and generates a suggestion string.
261 // It searches for fitting types in the following way (`X < Y`):
262 // - `iX`: if literal fits in `uX` => `uX`, else => `iY`
266 // No suggestion for: `isize`, `usize`.
267 fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> {
270 macro_rules! find_fit {
271 ($ty:expr, $val:expr, $negative:expr,
272 $($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => {
274 let _neg = if negative { 1 } else { 0 };
277 $(if !negative && val <= uint_ty_range($utypes).1 {
278 return Some($utypes.name_str())
280 $(if val <= int_ty_range($itypes).1 as u128 + _neg {
281 return Some($itypes.name_str())
291 ty::Int(i) => find_fit!(i, val, negative,
292 I8 => [U8] => [I16, I32, I64, I128],
293 I16 => [U16] => [I32, I64, I128],
294 I32 => [U32] => [I64, I128],
295 I64 => [U64] => [I128],
296 I128 => [U128] => []),
297 ty::Uint(u) => find_fit!(u, val, negative,
298 U8 => [U8, U16, U32, U64, U128] => [],
299 U16 => [U16, U32, U64, U128] => [],
300 U32 => [U32, U64, U128] => [],
301 U64 => [U64, U128] => [],
302 U128 => [U128] => []),
307 fn lint_int_literal<'tcx>(
308 cx: &LateContext<'tcx>,
309 type_limits: &TypeLimits,
310 e: &'tcx hir::Expr<'tcx>,
315 let int_type = t.normalize(cx.sess().target.pointer_width);
316 let (min, max) = int_ty_range(int_type);
317 let max = max as u128;
318 let negative = type_limits.negated_expr_id == Some(e.hir_id);
320 // Detect literal value out of range [min, max] inclusive
321 // avoiding use of -min to prevent overflow/panic
322 if (negative && v > max + 1) || (!negative && v > max) {
323 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
324 report_bin_hex_error(
327 attr::IntType::SignedInt(ty::ast_int_ty(t)),
328 Integer::from_int_ty(cx, t).size(),
336 if lint_overflowing_range_endpoint(cx, lit, v, max, e, t.name_str()) {
337 // The overflowing literal lint was emitted by `lint_overflowing_range_endpoint`.
344 .span_to_snippet(lit.span)
345 .expect("must get snippet from literal");
346 let help = get_type_suggestion(cx.typeck_results().node_type(e.hir_id), v, negative)
347 .map(|suggestion_ty| OverflowingIntHelp { suggestion_ty });
349 cx.emit_spanned_lint(
350 OVERFLOWING_LITERALS,
352 OverflowingInt { ty: t.name_str(), lit, min, max, help },
357 fn lint_uint_literal<'tcx>(
358 cx: &LateContext<'tcx>,
359 e: &'tcx hir::Expr<'tcx>,
363 let uint_type = t.normalize(cx.sess().target.pointer_width);
364 let (min, max) = uint_ty_range(uint_type);
365 let lit_val: u128 = match lit.node {
366 // _v is u8, within range by definition
367 ast::LitKind::Byte(_v) => return,
368 ast::LitKind::Int(v, _) => v,
371 if lit_val < min || lit_val > max {
372 let parent_id = cx.tcx.hir().parent_id(e.hir_id);
373 if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) {
375 hir::ExprKind::Cast(..) => {
376 if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() {
377 cx.emit_spanned_lint(
378 OVERFLOWING_LITERALS,
380 OnlyCastu8ToChar { span: par_e.span, literal: lit_val },
388 if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, t.name_str()) {
389 // The overflowing literal lint was emitted by `lint_overflowing_range_endpoint`.
392 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
393 report_bin_hex_error(
396 attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
397 Integer::from_uint_ty(cx, t).size(),
404 cx.emit_spanned_lint(
405 OVERFLOWING_LITERALS,
412 .span_to_snippet(lit.span)
413 .expect("must get snippet from literal"),
421 fn lint_literal<'tcx>(
422 cx: &LateContext<'tcx>,
423 type_limits: &TypeLimits,
424 e: &'tcx hir::Expr<'tcx>,
427 match *cx.typeck_results().node_type(e.hir_id).kind() {
430 ast::LitKind::Int(v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed) => {
431 lint_int_literal(cx, type_limits, e, lit, t, v)
436 ty::Uint(t) => lint_uint_literal(cx, e, lit, t),
438 let is_infinite = match lit.node {
439 ast::LitKind::Float(v, _) => match t {
440 ty::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite),
441 ty::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite),
445 if is_infinite == Ok(true) {
446 cx.emit_spanned_lint(
447 OVERFLOWING_LITERALS,
454 .span_to_snippet(lit.span)
455 .expect("must get snippet from literal"),
464 impl<'tcx> LateLintPass<'tcx> for TypeLimits {
465 fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
467 hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
468 // propagate negation, if the negation itself isn't negated
469 if self.negated_expr_id != Some(e.hir_id) {
470 self.negated_expr_id = Some(expr.hir_id);
473 hir::ExprKind::Binary(binop, ref l, ref r) => {
474 if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
475 cx.emit_spanned_lint(UNUSED_COMPARISONS, e.span, UnusedComparisons);
478 hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
482 fn is_valid<T: PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool {
484 hir::BinOpKind::Lt => v > min && v <= max,
485 hir::BinOpKind::Le => v >= min && v < max,
486 hir::BinOpKind::Gt => v >= min && v < max,
487 hir::BinOpKind::Ge => v > min && v <= max,
488 hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
493 fn rev_binop(binop: hir::BinOp) -> hir::BinOp {
497 hir::BinOpKind::Lt => hir::BinOpKind::Gt,
498 hir::BinOpKind::Le => hir::BinOpKind::Ge,
499 hir::BinOpKind::Gt => hir::BinOpKind::Lt,
500 hir::BinOpKind::Ge => hir::BinOpKind::Le,
507 cx: &LateContext<'_>,
512 let (lit, expr, swap) = match (&l.kind, &r.kind) {
513 (&hir::ExprKind::Lit(_), _) => (l, r, true),
514 (_, &hir::ExprKind::Lit(_)) => (r, l, false),
517 // Normalize the binop so that the literal is always on the RHS in
519 let norm_binop = if swap { rev_binop(binop) } else { binop };
520 match *cx.typeck_results().node_type(expr.hir_id).kind() {
522 let (min, max) = int_ty_range(int_ty);
523 let lit_val: i128 = match lit.kind {
524 hir::ExprKind::Lit(ref li) => match li.node {
527 ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed,
533 is_valid(norm_binop, lit_val, min, max)
535 ty::Uint(uint_ty) => {
536 let (min, max): (u128, u128) = uint_ty_range(uint_ty);
537 let lit_val: u128 = match lit.kind {
538 hir::ExprKind::Lit(ref li) => match li.node {
539 ast::LitKind::Int(v, _) => v,
544 is_valid(norm_binop, lit_val, min, max)
550 fn is_comparison(binop: hir::BinOp) -> bool {
565 /// The `improper_ctypes` lint detects incorrect use of types in foreign
572 /// static STATIC: String;
580 /// The compiler has several checks to verify that types used in `extern`
581 /// blocks are safe and follow certain rules to ensure proper
582 /// compatibility with the foreign interfaces. This lint is issued when it
583 /// detects a probable mistake in a definition. The lint usually should
584 /// provide a description of the issue, along with possibly a hint on how
588 "proper use of libc types in foreign modules"
591 declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]);
594 /// The `improper_ctypes_definitions` lint detects incorrect use of
595 /// [`extern` function] definitions.
597 /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
602 /// # #![allow(unused)]
603 /// pub extern "C" fn str_type(p: &str) { }
610 /// There are many parameter and return types that may be specified in an
611 /// `extern` function that are not compatible with the given ABI. This
612 /// lint is an alert that these types should not be used. The lint usually
613 /// should provide a description of the issue, along with possibly a hint
614 /// on how to resolve it.
615 IMPROPER_CTYPES_DEFINITIONS,
617 "proper use of libc types in foreign item definitions"
620 declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS]);
622 #[derive(Clone, Copy)]
623 pub(crate) enum CItemKind {
628 struct ImproperCTypesVisitor<'a, 'tcx> {
629 cx: &'a LateContext<'tcx>,
633 enum FfiResult<'tcx> {
635 FfiPhantom(Ty<'tcx>),
636 FfiUnsafe { ty: Ty<'tcx>, reason: DiagnosticMessage, help: Option<DiagnosticMessage> },
639 pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
641 def: ty::AdtDef<'tcx>,
643 tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
646 /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
648 pub fn transparent_newtype_field<'a, 'tcx>(
650 variant: &'a ty::VariantDef,
651 ) -> Option<&'a ty::FieldDef> {
652 let param_env = tcx.param_env(variant.def_id);
653 variant.fields.iter().find(|field| {
654 let field_ty = tcx.type_of(field.did);
655 let is_zst = tcx.layout_of(param_env.and(field_ty)).map_or(false, |layout| layout.is_zst());
660 /// Is type known to be non-null?
661 fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
664 ty::FnPtr(_) => true,
666 ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
667 ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
668 let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
674 // `UnsafeCell` has its niche hidden.
675 if def.is_unsafe_cell() {
681 .filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
682 .any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
688 /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
689 /// If the type passed in was not scalar, returns None.
690 fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
692 Some(match *ty.kind() {
693 ty::Adt(field_def, field_substs) => {
694 let inner_field_ty = {
695 let mut first_non_zst_ty = field_def
698 .filter_map(|v| transparent_newtype_field(cx.tcx, v));
700 first_non_zst_ty.clone().count(),
702 "Wrong number of fields for transparent type"
706 .expect("No non-zst fields in transparent type.")
707 .ty(tcx, field_substs)
709 return get_nullable_type(cx, inner_field_ty);
711 ty::Int(ty) => tcx.mk_mach_int(ty),
712 ty::Uint(ty) => tcx.mk_mach_uint(ty),
713 ty::RawPtr(ty_mut) => tcx.mk_ptr(ty_mut),
714 // As these types are always non-null, the nullable equivalent of
715 // Option<T> of these types are their raw pointer counterparts.
716 ty::Ref(_region, ty, mutbl) => tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }),
718 // There is no nullable equivalent for Rust's function pointers -- you
719 // must use an Option<fn(..) -> _> to represent it.
723 // We should only ever reach this case if ty_is_known_nonnull is extended
727 "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
735 /// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
736 /// can, return the type that `ty` can be safely converted to, otherwise return `None`.
737 /// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`,
738 /// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
739 /// FIXME: This duplicates code in codegen.
740 pub(crate) fn repr_nullable_ptr<'tcx>(
741 cx: &LateContext<'tcx>,
744 ) -> Option<Ty<'tcx>> {
745 debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
746 if let ty::Adt(ty_def, substs) = ty.kind() {
747 let field_ty = match &ty_def.variants().raw[..] {
748 [var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
749 ([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
755 if !ty_is_known_nonnull(cx, field_ty, ckind) {
759 // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
760 // If the computed size for the field and the enum are different, the nonnull optimization isn't
761 // being applied (and we've got a problem somewhere).
762 let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap();
763 if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
764 bug!("improper_ctypes: Option nonnull optimization not applied?");
767 // Return the nullable type this Option-like enum can be safely represented with.
768 let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
769 if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
770 match field_ty_scalar.valid_range(cx) {
771 WrappingRange { start: 0, end }
772 if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
774 return Some(get_nullable_type(cx, field_ty).unwrap());
776 WrappingRange { start: 1, .. } => {
777 return Some(get_nullable_type(cx, field_ty).unwrap());
779 WrappingRange { start, end } => {
780 unreachable!("Unhandled start and end range: ({}, {})", start, end)
788 impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
789 /// Check if the type is array and emit an unsafe type lint.
790 fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
791 if let ty::Array(..) = ty.kind() {
792 self.emit_ffi_unsafe_type_lint(
795 fluent::lint_improper_ctypes_array_reason,
796 Some(fluent::lint_improper_ctypes_array_help),
804 /// Checks if the given field's type is "ffi-safe".
805 fn check_field_type_for_ffi(
807 cache: &mut FxHashSet<Ty<'tcx>>,
808 field: &ty::FieldDef,
809 substs: SubstsRef<'tcx>,
810 ) -> FfiResult<'tcx> {
811 let field_ty = field.ty(self.cx.tcx, substs);
812 if field_ty.has_opaque_types() {
813 self.check_type_for_ffi(cache, field_ty)
815 let field_ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, field_ty);
816 self.check_type_for_ffi(cache, field_ty)
820 /// Checks if the given `VariantDef`'s field types are "ffi-safe".
821 fn check_variant_for_ffi(
823 cache: &mut FxHashSet<Ty<'tcx>>,
825 def: ty::AdtDef<'tcx>,
826 variant: &ty::VariantDef,
827 substs: SubstsRef<'tcx>,
828 ) -> FfiResult<'tcx> {
831 let transparent_safety = def.repr().transparent().then(|| {
832 // Can assume that at most one field is not a ZST, so only check
833 // that field's type for FFI-safety.
834 if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
835 return self.check_field_type_for_ffi(cache, field, substs);
837 // All fields are ZSTs; this means that the type should behave
838 // like (), which is FFI-unsafe... except if all fields are PhantomData,
839 // which is tested for below
840 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None }
843 // We can't completely trust repr(C) markings; make sure the fields are
845 let mut all_phantom = !variant.fields.is_empty();
846 for field in &variant.fields {
847 match self.check_field_type_for_ffi(cache, &field, substs) {
851 FfiPhantom(..) if !def.repr().transparent() && def.is_enum() => {
854 reason: fluent::lint_improper_ctypes_enum_phantomdata,
859 r => return transparent_safety.unwrap_or(r),
863 if all_phantom { FfiPhantom(ty) } else { transparent_safety.unwrap_or(FfiSafe) }
866 /// Checks if the given type is "ffi-safe" (has a stable, well-defined
867 /// representation which can be exported to C code).
868 fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> {
871 let tcx = self.cx.tcx;
873 // Protect against infinite recursion, for example
874 // `struct S(*mut S);`.
875 // FIXME: A recursion limit is necessary as well, for irregular
877 if !cache.insert(ty) {
882 ty::Adt(def, substs) => {
883 if def.is_box() && matches!(self.mode, CItemKind::Definition) {
884 if ty.boxed_ty().is_sized(tcx, self.cx.param_env) {
889 reason: fluent::lint_improper_ctypes_box,
894 if def.is_phantom_data() {
895 return FfiPhantom(ty);
897 match def.adt_kind() {
898 AdtKind::Struct | AdtKind::Union => {
899 if !def.repr().c() && !def.repr().transparent() {
902 reason: if def.is_struct() {
903 fluent::lint_improper_ctypes_struct_layout_reason
905 fluent::lint_improper_ctypes_union_layout_reason
907 help: if def.is_struct() {
908 Some(fluent::lint_improper_ctypes_struct_layout_help)
910 Some(fluent::lint_improper_ctypes_union_layout_help)
915 let is_non_exhaustive =
916 def.non_enum_variant().is_field_list_non_exhaustive();
917 if is_non_exhaustive && !def.did().is_local() {
920 reason: if def.is_struct() {
921 fluent::lint_improper_ctypes_struct_non_exhaustive
923 fluent::lint_improper_ctypes_union_non_exhaustive
929 if def.non_enum_variant().fields.is_empty() {
932 reason: if def.is_struct() {
933 fluent::lint_improper_ctypes_struct_fieldless_reason
935 fluent::lint_improper_ctypes_union_fieldless_reason
937 help: if def.is_struct() {
938 Some(fluent::lint_improper_ctypes_struct_fieldless_help)
940 Some(fluent::lint_improper_ctypes_union_fieldless_help)
945 self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
948 if def.variants().is_empty() {
949 // Empty enums are okay... although sort of useless.
953 // Check for a repr() attribute to specify the size of the
955 if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
957 // Special-case types like `Option<extern fn()>`.
958 if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
961 reason: fluent::lint_improper_ctypes_enum_repr_reason,
962 help: Some(fluent::lint_improper_ctypes_enum_repr_help),
967 if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
970 reason: fluent::lint_improper_ctypes_non_exhaustive,
975 // Check the contained variants.
976 for variant in def.variants() {
977 let is_non_exhaustive = variant.is_field_list_non_exhaustive();
978 if is_non_exhaustive && !variant.def_id.is_local() {
981 reason: fluent::lint_improper_ctypes_non_exhaustive_variant,
986 match self.check_variant_for_ffi(cache, ty, def, variant, substs) {
997 ty::Char => FfiUnsafe {
999 reason: fluent::lint_improper_ctypes_char_reason,
1000 help: Some(fluent::lint_improper_ctypes_char_help),
1003 ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
1004 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_128bit, help: None }
1007 // Primitive types with a stable representation.
1008 ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
1010 ty::Slice(_) => FfiUnsafe {
1012 reason: fluent::lint_improper_ctypes_slice_reason,
1013 help: Some(fluent::lint_improper_ctypes_slice_help),
1016 ty::Dynamic(..) => {
1017 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None }
1020 ty::Str => FfiUnsafe {
1022 reason: fluent::lint_improper_ctypes_str_reason,
1023 help: Some(fluent::lint_improper_ctypes_str_help),
1026 ty::Tuple(..) => FfiUnsafe {
1028 reason: fluent::lint_improper_ctypes_tuple_reason,
1029 help: Some(fluent::lint_improper_ctypes_tuple_help),
1032 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _)
1034 matches!(self.mode, CItemKind::Definition)
1035 && ty.is_sized(self.cx.tcx, self.cx.param_env)
1041 ty::RawPtr(ty::TypeAndMut { ty, .. })
1042 if match ty.kind() {
1043 ty::Tuple(tuple) => tuple.is_empty(),
1050 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
1051 self.check_type_for_ffi(cache, ty)
1054 ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty),
1057 if self.is_internal_abi(sig.abi()) {
1060 reason: fluent::lint_improper_ctypes_fnptr_reason,
1061 help: Some(fluent::lint_improper_ctypes_fnptr_help),
1065 let sig = tcx.erase_late_bound_regions(sig);
1066 if !sig.output().is_unit() {
1067 let r = self.check_type_for_ffi(cache, sig.output());
1075 for arg in sig.inputs() {
1076 let r = self.check_type_for_ffi(cache, *arg);
1087 ty::Foreign(..) => FfiSafe,
1089 // While opaque types are checked for earlier, if a projection in a struct field
1090 // normalizes to an opaque type, then it will reach this branch.
1091 ty::Alias(ty::Opaque, ..) => {
1092 FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None }
1095 // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
1096 // so they are currently ignored for the purposes of this lint.
1097 ty::Param(..) | ty::Alias(ty::Projection, ..)
1098 if matches!(self.mode, CItemKind::Definition) =>
1104 | ty::Alias(ty::Projection, ..)
1110 | ty::GeneratorWitness(..)
1111 | ty::GeneratorWitnessMIR(..)
1112 | ty::Placeholder(..)
1113 | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
1117 fn emit_ffi_unsafe_type_lint(
1121 note: DiagnosticMessage,
1122 help: Option<DiagnosticMessage>,
1124 let lint = match self.mode {
1125 CItemKind::Declaration => IMPROPER_CTYPES,
1126 CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
1128 let desc = match self.mode {
1129 CItemKind::Declaration => "block",
1130 CItemKind::Definition => "fn",
1132 let span_note = if let ty::Adt(def, _) = ty.kind()
1133 && let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
1138 self.cx.emit_spanned_lint(
1141 ImproperCTypes { ty, desc, label: sp, help, note, span_note },
1145 fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
1146 struct ProhibitOpaqueTypes;
1147 impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueTypes {
1148 type BreakTy = Ty<'tcx>;
1150 fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
1151 if !ty.has_opaque_types() {
1152 return ControlFlow::Continue(());
1155 if let ty::Alias(ty::Opaque, ..) = ty.kind() {
1156 ControlFlow::Break(ty)
1158 ty.super_visit_with(self)
1163 if let Some(ty) = self
1166 .normalize_erasing_regions(self.cx.param_env, ty)
1167 .visit_with(&mut ProhibitOpaqueTypes)
1170 self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint_improper_ctypes_opaque, None);
1177 fn check_type_for_ffi_and_report_errors(
1182 is_return_type: bool,
1184 // We have to check for opaque types before `normalize_erasing_regions`,
1185 // which will replace opaque types with their underlying concrete type.
1186 if self.check_for_opaque_ty(sp, ty) {
1187 // We've already emitted an error due to an opaque type.
1191 // it is only OK to use this function because extern fns cannot have
1192 // any generic types right now:
1193 let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
1195 // C doesn't really support passing arrays by value - the only way to pass an array by value
1196 // is through a struct. So, first test that the top level isn't an array, and then
1197 // recursively check the types inside.
1198 if !is_static && self.check_for_array_ty(sp, ty) {
1202 // Don't report FFI errors for unit return types. This check exists here, and not in
1203 // `check_foreign_fn` (where it would make more sense) so that normalization has definitely
1205 if is_return_type && ty.is_unit() {
1209 match self.check_type_for_ffi(&mut FxHashSet::default(), ty) {
1210 FfiResult::FfiSafe => {}
1211 FfiResult::FfiPhantom(ty) => {
1212 self.emit_ffi_unsafe_type_lint(
1215 fluent::lint_improper_ctypes_only_phantomdata,
1219 // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic
1220 // argument, which after substitution, is `()`, then this branch can be hit.
1221 FfiResult::FfiUnsafe { ty, .. } if is_return_type && ty.is_unit() => {}
1222 FfiResult::FfiUnsafe { ty, reason, help } => {
1223 self.emit_ffi_unsafe_type_lint(ty, sp, reason, help);
1228 fn check_foreign_fn(&mut self, def_id: LocalDefId, decl: &hir::FnDecl<'_>) {
1229 let sig = self.cx.tcx.fn_sig(def_id).subst_identity();
1230 let sig = self.cx.tcx.erase_late_bound_regions(sig);
1232 for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
1233 self.check_type_for_ffi_and_report_errors(input_hir.span, *input_ty, false, false);
1236 if let hir::FnRetTy::Return(ref ret_hir) = decl.output {
1237 let ret_ty = sig.output();
1238 self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false, true);
1242 fn check_foreign_static(&mut self, id: hir::OwnerId, span: Span) {
1243 let ty = self.cx.tcx.type_of(id);
1244 self.check_type_for_ffi_and_report_errors(span, ty, true, false);
1247 fn is_internal_abi(&self, abi: SpecAbi) -> bool {
1250 SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic
1255 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
1256 fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) {
1257 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
1258 let abi = cx.tcx.hir().get_foreign_abi(it.hir_id());
1260 if !vis.is_internal_abi(abi) {
1262 hir::ForeignItemKind::Fn(ref decl, _, _) => {
1263 vis.check_foreign_fn(it.owner_id.def_id, decl);
1265 hir::ForeignItemKind::Static(ref ty, _) => {
1266 vis.check_foreign_static(it.owner_id, ty.span);
1268 hir::ForeignItemKind::Type => (),
1274 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
1277 cx: &LateContext<'tcx>,
1278 kind: hir::intravisit::FnKind<'tcx>,
1279 decl: &'tcx hir::FnDecl<'_>,
1280 _: &'tcx hir::Body<'_>,
1284 use hir::intravisit::FnKind;
1286 let abi = match kind {
1287 FnKind::ItemFn(_, _, header, ..) => header.abi,
1288 FnKind::Method(_, sig, ..) => sig.header.abi,
1292 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
1293 if !vis.is_internal_abi(abi) {
1294 vis.check_foreign_fn(id, decl);
1299 declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
1301 impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
1302 fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
1303 if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
1304 let t = cx.tcx.type_of(it.owner_id);
1305 let ty = cx.tcx.erase_regions(t);
1306 let Ok(layout) = cx.layout_of(ty) else { return };
1307 let Variants::Multiple {
1308 tag_encoding: TagEncoding::Direct, tag, ref variants, ..
1309 } = &layout.variants else {
1313 let tag_size = tag.size(&cx.tcx).bytes();
1316 "enum `{}` is {} bytes large with layout:\n{:#?}",
1318 layout.size.bytes(),
1322 let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
1323 .map(|(variant, variant_layout)| {
1324 // Subtract the size of the enum tag.
1325 let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
1327 debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
1331 .fold((0, 0, 0), |(l, s, li), (idx, size)| {
1334 } else if size > s {
1341 // We only warn if the largest variant is at least thrice as large as
1342 // the second-largest.
1343 if largest > slargest * 3 && slargest > 0 {
1344 cx.emit_spanned_lint(
1345 VARIANT_SIZE_DIFFERENCES,
1346 enum_definition.variants[largest_index].span,
1347 VariantSizeDifferencesDiag { largest },
1355 /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
1356 /// to an atomic operation that does not support that ordering.
1360 /// ```rust,compile_fail
1361 /// # use core::sync::atomic::{AtomicU8, Ordering};
1362 /// let atom = AtomicU8::new(0);
1363 /// let value = atom.load(Ordering::Release);
1364 /// # let _ = value;
1371 /// Some atomic operations are only supported for a subset of the
1372 /// `atomic::Ordering` variants. Passing an unsupported variant will cause
1373 /// an unconditional panic at runtime, which is detected by this lint.
1375 /// This lint will trigger in the following cases: (where `AtomicType` is an
1376 /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
1377 /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
1379 /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
1380 /// `AtomicType::store`.
1382 /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
1383 /// `AtomicType::load`.
1385 /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
1386 /// `core::sync::atomic::compiler_fence`.
1388 /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
1389 /// ordering for any of `AtomicType::compare_exchange`,
1390 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
1391 INVALID_ATOMIC_ORDERING,
1393 "usage of invalid atomic ordering in atomic operations and memory fences"
1396 declare_lint_pass!(InvalidAtomicOrdering => [INVALID_ATOMIC_ORDERING]);
1398 impl InvalidAtomicOrdering {
1399 fn inherent_atomic_method_call<'hir>(
1400 cx: &LateContext<'_>,
1402 recognized_names: &[Symbol], // used for fast path calculation
1403 ) -> Option<(Symbol, &'hir [Expr<'hir>])> {
1404 const ATOMIC_TYPES: &[Symbol] = &[
1420 if let ExprKind::MethodCall(ref method_path, _, args, _) = &expr.kind
1421 && recognized_names.contains(&method_path.ident.name)
1422 && let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
1423 && let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
1424 && let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def()
1425 // skip extension traits, only lint functions from the standard library
1426 && cx.tcx.trait_id_of_impl(impl_did).is_none()
1427 && let parent = cx.tcx.parent(adt.did())
1428 && cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
1429 && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
1431 return Some((method_path.ident.name, args));
1436 fn match_ordering(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<Symbol> {
1437 let ExprKind::Path(ref ord_qpath) = ord_arg.kind else { return None };
1438 let did = cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()?;
1440 let atomic_ordering = tcx.get_diagnostic_item(sym::Ordering);
1441 let name = tcx.item_name(did);
1442 let parent = tcx.parent(did);
1443 [sym::Relaxed, sym::Release, sym::Acquire, sym::AcqRel, sym::SeqCst].into_iter().find(
1446 && (Some(parent) == atomic_ordering
1447 // needed in case this is a ctor, not a variant
1448 || tcx.opt_parent(parent) == atomic_ordering)
1453 fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
1454 if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
1455 && let Some((ordering_arg, invalid_ordering)) = match method {
1456 sym::load => Some((&args[0], sym::Release)),
1457 sym::store => Some((&args[1], sym::Acquire)),
1460 && let Some(ordering) = Self::match_ordering(cx, ordering_arg)
1461 && (ordering == invalid_ordering || ordering == sym::AcqRel)
1463 if method == sym::load {
1464 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingLoad);
1466 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, AtomicOrderingStore);
1471 fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
1472 if let ExprKind::Call(ref func, ref args) = expr.kind
1473 && let ExprKind::Path(ref func_qpath) = func.kind
1474 && let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id()
1475 && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
1476 && Self::match_ordering(cx, &args[0]) == Some(sym::Relaxed)
1478 cx.emit_spanned_lint(INVALID_ATOMIC_ORDERING, args[0].span, AtomicOrderingFence);
1482 fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
1483 let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak])
1486 let fail_order_arg = match method {
1487 sym::fetch_update => &args[1],
1488 sym::compare_exchange | sym::compare_exchange_weak => &args[3],
1492 let Some(fail_ordering) = Self::match_ordering(cx, fail_order_arg) else { return };
1494 if matches!(fail_ordering, sym::Release | sym::AcqRel) {
1495 cx.emit_spanned_lint(
1496 INVALID_ATOMIC_ORDERING,
1497 fail_order_arg.span,
1498 InvalidAtomicOrderingDiag { method, fail_order_arg_span: fail_order_arg.span },
1504 impl<'tcx> LateLintPass<'tcx> for InvalidAtomicOrdering {
1505 fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
1506 Self::check_atomic_load_store(cx, expr);
1507 Self::check_memory_fence(cx, expr);
1508 Self::check_atomic_compare_exchange(cx, expr);