]> git.lizzy.rs Git - rust.git/blob - src/tools/clippy/clippy_lints/src/atomic_ordering.rs
Rollup merge of #85760 - ChrisDenton:path-doc-platform-specific, r=m-ou-se
[rust.git] / src / tools / clippy / clippy_lints / src / atomic_ordering.rs
1 use clippy_utils::diagnostics::span_lint_and_help;
2 use clippy_utils::match_def_path;
3 use if_chain::if_chain;
4 use rustc_hir::def_id::DefId;
5 use rustc_hir::{Expr, ExprKind};
6 use rustc_lint::{LateContext, LateLintPass};
7 use rustc_middle::ty;
8 use rustc_session::{declare_lint_pass, declare_tool_lint};
9
10 declare_clippy_lint! {
11     /// **What it does:** Checks for usage of invalid atomic
12     /// ordering in atomic loads/stores/exchanges/updates and
13     /// memory fences.
14     ///
15     /// **Why is this bad?** Using an invalid atomic ordering
16     /// will cause a panic at run-time.
17     ///
18     /// **Known problems:** None.
19     ///
20     /// **Example:**
21     /// ```rust,no_run
22     /// # use std::sync::atomic::{self, AtomicU8, Ordering};
23     ///
24     /// let x = AtomicU8::new(0);
25     ///
26     /// // Bad: `Release` and `AcqRel` cannot be used for `load`.
27     /// let _ = x.load(Ordering::Release);
28     /// let _ = x.load(Ordering::AcqRel);
29     ///
30     /// // Bad: `Acquire` and `AcqRel` cannot be used for `store`.
31     /// x.store(1, Ordering::Acquire);
32     /// x.store(2, Ordering::AcqRel);
33     ///
34     /// // Bad: `Relaxed` cannot be used as a fence's ordering.
35     /// atomic::fence(Ordering::Relaxed);
36     /// atomic::compiler_fence(Ordering::Relaxed);
37     ///
38     /// // Bad: `Release` and `AcqRel` are both always invalid
39     /// // for the failure ordering (the last arg).
40     /// let _ = x.compare_exchange(1, 2, Ordering::SeqCst, Ordering::Release);
41     /// let _ = x.compare_exchange_weak(2, 3, Ordering::AcqRel, Ordering::AcqRel);
42     ///
43     /// // Bad: The failure ordering is not allowed to be
44     /// // stronger than the success order, and `SeqCst` is
45     /// // stronger than `Relaxed`.
46     /// let _ = x.fetch_update(Ordering::Relaxed, Ordering::SeqCst, |val| Some(val + val));
47     /// ```
48     pub INVALID_ATOMIC_ORDERING,
49     correctness,
50     "usage of invalid atomic ordering in atomic operations and memory fences"
51 }
52
53 declare_lint_pass!(AtomicOrdering => [INVALID_ATOMIC_ORDERING]);
54
55 const ATOMIC_TYPES: [&str; 12] = [
56     "AtomicBool",
57     "AtomicI8",
58     "AtomicI16",
59     "AtomicI32",
60     "AtomicI64",
61     "AtomicIsize",
62     "AtomicPtr",
63     "AtomicU8",
64     "AtomicU16",
65     "AtomicU32",
66     "AtomicU64",
67     "AtomicUsize",
68 ];
69
70 fn type_is_atomic(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
71     if let ty::Adt(&ty::AdtDef { did, .. }, _) = cx.typeck_results().expr_ty(expr).kind() {
72         ATOMIC_TYPES
73             .iter()
74             .any(|ty| match_def_path(cx, did, &["core", "sync", "atomic", ty]))
75     } else {
76         false
77     }
78 }
79
80 fn match_ordering_def_path(cx: &LateContext<'_>, did: DefId, orderings: &[&str]) -> bool {
81     orderings
82         .iter()
83         .any(|ordering| match_def_path(cx, did, &["core", "sync", "atomic", "Ordering", ordering]))
84 }
85
86 fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
87     if_chain! {
88         if let ExprKind::MethodCall(method_path, _, args, _) = &expr.kind;
89         let method = method_path.ident.name.as_str();
90         if type_is_atomic(cx, &args[0]);
91         if method == "load" || method == "store";
92         let ordering_arg = if method == "load" { &args[1] } else { &args[2] };
93         if let ExprKind::Path(ref ordering_qpath) = ordering_arg.kind;
94         if let Some(ordering_def_id) = cx.qpath_res(ordering_qpath, ordering_arg.hir_id).opt_def_id();
95         then {
96             if method == "load" &&
97                 match_ordering_def_path(cx, ordering_def_id, &["Release", "AcqRel"]) {
98                 span_lint_and_help(
99                     cx,
100                     INVALID_ATOMIC_ORDERING,
101                     ordering_arg.span,
102                     "atomic loads cannot have `Release` and `AcqRel` ordering",
103                     None,
104                     "consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`"
105                 );
106             } else if method == "store" &&
107                 match_ordering_def_path(cx, ordering_def_id, &["Acquire", "AcqRel"]) {
108                 span_lint_and_help(
109                     cx,
110                     INVALID_ATOMIC_ORDERING,
111                     ordering_arg.span,
112                     "atomic stores cannot have `Acquire` and `AcqRel` ordering",
113                     None,
114                     "consider using ordering modes `Release`, `SeqCst` or `Relaxed`"
115                 );
116             }
117         }
118     }
119 }
120
121 fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
122     if_chain! {
123         if let ExprKind::Call(func, args) = expr.kind;
124         if let ExprKind::Path(ref func_qpath) = func.kind;
125         if let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id();
126         if ["fence", "compiler_fence"]
127             .iter()
128             .any(|func| match_def_path(cx, def_id, &["core", "sync", "atomic", func]));
129         if let ExprKind::Path(ref ordering_qpath) = &args[0].kind;
130         if let Some(ordering_def_id) = cx.qpath_res(ordering_qpath, args[0].hir_id).opt_def_id();
131         if match_ordering_def_path(cx, ordering_def_id, &["Relaxed"]);
132         then {
133             span_lint_and_help(
134                 cx,
135                 INVALID_ATOMIC_ORDERING,
136                 args[0].span,
137                 "memory fences cannot have `Relaxed` ordering",
138                 None,
139                 "consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`"
140             );
141         }
142     }
143 }
144
145 fn opt_ordering_defid(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<DefId> {
146     if let ExprKind::Path(ref ord_qpath) = ord_arg.kind {
147         cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()
148     } else {
149         None
150     }
151 }
152
153 fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
154     if_chain! {
155         if let ExprKind::MethodCall(method_path, _, args, _) = &expr.kind;
156         let method = method_path.ident.name.as_str();
157         if type_is_atomic(cx, &args[0]);
158         if method == "compare_exchange" || method == "compare_exchange_weak" || method == "fetch_update";
159         let (success_order_arg, failure_order_arg) = if method == "fetch_update" {
160             (&args[1], &args[2])
161         } else {
162             (&args[3], &args[4])
163         };
164         if let Some(fail_ordering_def_id) = opt_ordering_defid(cx, failure_order_arg);
165         then {
166             // Helper type holding on to some checking and error reporting data. Has
167             // - (success ordering name,
168             // - list of failure orderings forbidden by the success order,
169             // - suggestion message)
170             type OrdLintInfo = (&'static str, &'static [&'static str], &'static str);
171             let relaxed: OrdLintInfo = ("Relaxed", &["SeqCst", "Acquire"], "ordering mode `Relaxed`");
172             let acquire: OrdLintInfo = ("Acquire", &["SeqCst"], "ordering modes `Acquire` or `Relaxed`");
173             let seq_cst: OrdLintInfo = ("SeqCst", &[], "ordering modes `Acquire`, `SeqCst` or `Relaxed`");
174             let release = ("Release", relaxed.1, relaxed.2);
175             let acqrel = ("AcqRel", acquire.1, acquire.2);
176             let search = [relaxed, acquire, seq_cst, release, acqrel];
177
178             let success_lint_info = opt_ordering_defid(cx, success_order_arg)
179                 .and_then(|success_ord_def_id| -> Option<OrdLintInfo> {
180                     search
181                         .iter()
182                         .find(|(ordering, ..)| {
183                             match_def_path(cx, success_ord_def_id,
184                                 &["core", "sync", "atomic", "Ordering", ordering])
185                         })
186                         .copied()
187                 });
188
189             if match_ordering_def_path(cx, fail_ordering_def_id, &["Release", "AcqRel"]) {
190                 // If we don't know the success order is, use what we'd suggest
191                 // if it were maximally permissive.
192                 let suggested = success_lint_info.unwrap_or(seq_cst).2;
193                 span_lint_and_help(
194                     cx,
195                     INVALID_ATOMIC_ORDERING,
196                     failure_order_arg.span,
197                     &format!(
198                         "{}'s failure ordering may not be `Release` or `AcqRel`",
199                         method,
200                     ),
201                     None,
202                     &format!("consider using {} instead", suggested),
203                 );
204             } else if let Some((success_ord_name, bad_ords_given_success, suggested)) = success_lint_info {
205                 if match_ordering_def_path(cx, fail_ordering_def_id, bad_ords_given_success) {
206                     span_lint_and_help(
207                         cx,
208                         INVALID_ATOMIC_ORDERING,
209                         failure_order_arg.span,
210                         &format!(
211                             "{}'s failure ordering may not be stronger than the success ordering of `{}`",
212                             method,
213                             success_ord_name,
214                         ),
215                         None,
216                         &format!("consider using {} instead", suggested),
217                     );
218                 }
219             }
220         }
221     }
222 }
223
224 impl<'tcx> LateLintPass<'tcx> for AtomicOrdering {
225     fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
226         check_atomic_load_store(cx, expr);
227         check_memory_fence(cx, expr);
228         check_atomic_compare_exchange(cx, expr);
229     }
230 }