-use crate::utils::span_lint;
+use crate::utils::{match_type, paths, return_ty, span_lint};
use itertools::Itertools;
use pulldown_cmark;
-use rustc::lint::{EarlyContext, EarlyLintPass, LintArray, LintPass};
-use rustc::{declare_tool_lint, lint_array};
+use rustc::hir;
+use rustc::impl_lint_pass;
+use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
use rustc_data_structures::fx::FxHashSet;
-use syntax::ast;
-use syntax::source_map::{BytePos, Span};
+use rustc_session::declare_tool_lint;
+use std::ops::Range;
+use syntax::ast::{AttrKind, Attribute};
+use syntax::source_map::{BytePos, MultiSpan, Span};
use syntax_pos::Pos;
use url::Url;
/// /// Do something with the foo_bar parameter. See also
/// /// that::other::module::foo.
/// // ^ `foo_bar` and `that::other::module::foo` should be ticked.
- /// fn doit(foo_bar) { .. }
+ /// fn doit(foo_bar: usize) {}
/// ```
pub DOC_MARKDOWN,
pedantic,
"presence of `_`, `::` or camel-case outside backticks in documentation"
}
+declare_clippy_lint! {
+ /// **What it does:** Checks for the doc comments of publicly visible
+ /// unsafe functions and warns if there is no `# Safety` section.
+ ///
+ /// **Why is this bad?** Unsafe functions should document their safety
+ /// preconditions, so that users can be sure they are using them safely.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Examples:**
+ /// ```rust
+ ///# type Universe = ();
+ /// /// This function should really be documented
+ /// pub unsafe fn start_apocalypse(u: &mut Universe) {
+ /// unimplemented!();
+ /// }
+ /// ```
+ ///
+ /// At least write a line about safety:
+ ///
+ /// ```rust
+ ///# type Universe = ();
+ /// /// # Safety
+ /// ///
+ /// /// This function should not be called before the horsemen are ready.
+ /// pub unsafe fn start_apocalypse(u: &mut Universe) {
+ /// unimplemented!();
+ /// }
+ /// ```
+ pub MISSING_SAFETY_DOC,
+ style,
+ "`pub unsafe fn` without `# Safety` docs"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Checks the doc comments of publicly visible functions that
+ /// return a `Result` type and warns if there is no `# Errors` section.
+ ///
+ /// **Why is this bad?** Documenting the type of errors that can be returned from a
+ /// function can help callers write code to handle the errors appropriately.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Examples:**
+ ///
+ /// Since the following function returns a `Result` it has an `# Errors` section in
+ /// its doc comment:
+ ///
+ /// ```rust
+ ///# use std::io;
+ /// /// # Errors
+ /// ///
+ /// /// Will return `Err` if `filename` does not exist or the user does not have
+ /// /// permission to read it.
+ /// pub fn read(filename: String) -> io::Result<String> {
+ /// unimplemented!();
+ /// }
+ /// ```
+ pub MISSING_ERRORS_DOC,
+ pedantic,
+ "`pub fn` returns `Result` without `# Errors` in doc comment"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Checks for `fn main() { .. }` in doctests
+ ///
+ /// **Why is this bad?** The test can be shorter (and likely more readable)
+ /// if the `fn main()` is left implicit.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Examples:**
+ /// ``````rust
+ /// /// An example of a doctest with a `main()` function
+ /// ///
+ /// /// # Examples
+ /// ///
+ /// /// ```
+ /// /// fn main() {
+ /// /// // this needs not be in an `fn`
+ /// /// }
+ /// /// ```
+ /// fn needless_main() {
+ /// unimplemented!();
+ /// }
+ /// ``````
+ pub NEEDLESS_DOCTEST_MAIN,
+ style,
+ "presence of `fn main() {` in code examples"
+}
+
+#[allow(clippy::module_name_repetitions)]
#[derive(Clone)]
-pub struct Doc {
+pub struct DocMarkdown {
valid_idents: FxHashSet<String>,
+ in_trait_impl: bool,
}
-impl Doc {
+impl DocMarkdown {
pub fn new(valid_idents: FxHashSet<String>) -> Self {
- Self { valid_idents }
+ Self {
+ valid_idents,
+ in_trait_impl: false,
+ }
}
}
-impl LintPass for Doc {
- fn get_lints(&self) -> LintArray {
- lint_array![DOC_MARKDOWN]
- }
+impl_lint_pass!(DocMarkdown => [DOC_MARKDOWN, MISSING_SAFETY_DOC, MISSING_ERRORS_DOC, NEEDLESS_DOCTEST_MAIN]);
- fn name(&self) -> &'static str {
- "DocMarkdown"
+impl<'a, 'tcx> LateLintPass<'a, 'tcx> for DocMarkdown {
+ fn check_crate(&mut self, cx: &LateContext<'a, 'tcx>, krate: &'tcx hir::Crate) {
+ check_attrs(cx, &self.valid_idents, &krate.attrs);
}
-}
-impl EarlyLintPass for Doc {
- fn check_crate(&mut self, cx: &EarlyContext<'_>, krate: &ast::Crate) {
- check_attrs(cx, &self.valid_idents, &krate.attrs);
+ fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item) {
+ let headers = check_attrs(cx, &self.valid_idents, &item.attrs);
+ match item.kind {
+ hir::ItemKind::Fn(ref sig, ..) => {
+ lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers);
+ },
+ hir::ItemKind::Impl(_, _, _, _, ref trait_ref, ..) => {
+ self.in_trait_impl = trait_ref.is_some();
+ },
+ _ => {},
+ }
}
- fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
- check_attrs(cx, &self.valid_idents, &item.attrs);
+ fn check_item_post(&mut self, _cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item) {
+ if let hir::ItemKind::Impl(..) = item.kind {
+ self.in_trait_impl = false;
+ }
}
-}
-struct Parser<'a> {
- parser: pulldown_cmark::Parser<'a>,
-}
+ fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::TraitItem) {
+ let headers = check_attrs(cx, &self.valid_idents, &item.attrs);
+ if let hir::TraitItemKind::Method(ref sig, ..) = item.kind {
+ lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers);
+ }
+ }
-impl<'a> Parser<'a> {
- fn new(parser: pulldown_cmark::Parser<'a>) -> Self {
- Self { parser }
+ fn check_impl_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::ImplItem) {
+ let headers = check_attrs(cx, &self.valid_idents, &item.attrs);
+ if self.in_trait_impl {
+ return;
+ }
+ if let hir::ImplItemKind::Method(ref sig, ..) = item.kind {
+ lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers);
+ }
}
}
-impl<'a> Iterator for Parser<'a> {
- type Item = (usize, pulldown_cmark::Event<'a>);
-
- fn next(&mut self) -> Option<Self::Item> {
- let offset = self.parser.get_offset();
- self.parser.next().map(|event| (offset, event))
+fn lint_for_missing_headers<'a, 'tcx>(
+ cx: &LateContext<'a, 'tcx>,
+ hir_id: hir::HirId,
+ span: impl Into<MultiSpan> + Copy,
+ sig: &hir::FnSig,
+ headers: DocHeaders,
+) {
+ if !cx.access_levels.is_exported(hir_id) {
+ return; // Private functions do not require doc comments
+ }
+ if !headers.safety && sig.header.unsafety == hir::Unsafety::Unsafe {
+ span_lint(
+ cx,
+ MISSING_SAFETY_DOC,
+ span,
+ "unsafe function's docs miss `# Safety` section",
+ );
+ }
+ if !headers.errors && match_type(cx, return_ty(cx, hir_id), &paths::RESULT) {
+ span_lint(
+ cx,
+ MISSING_ERRORS_DOC,
+ span,
+ "docs for function returning `Result` missing `# Errors` section",
+ );
}
}
/// need to keep track of
/// the spans but this function is inspired from the later.
#[allow(clippy::cast_possible_truncation)]
+#[must_use]
pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) {
// one-line comments lose their prefix
const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
panic!("not a doc-comment: {}", comment);
}
-pub fn check_attrs<'a>(cx: &EarlyContext<'_>, valid_idents: &FxHashSet<String>, attrs: &'a [ast::Attribute]) {
+#[derive(Copy, Clone)]
+struct DocHeaders {
+ safety: bool,
+ errors: bool,
+}
+
+fn check_attrs<'a>(cx: &LateContext<'_, '_>, valid_idents: &FxHashSet<String>, attrs: &'a [Attribute]) -> DocHeaders {
let mut doc = String::new();
let mut spans = vec![];
for attr in attrs {
- if attr.is_sugared_doc {
- if let Some(ref current) = attr.value_str() {
- let current = current.to_string();
- let (current, current_spans) = strip_doc_comment_decoration(¤t, attr.span);
- spans.extend_from_slice(¤t_spans);
- doc.push_str(¤t);
- }
- } else if attr.name() == "doc" {
+ if let AttrKind::DocComment(ref comment) = attr.kind {
+ let comment = comment.to_string();
+ let (comment, current_spans) = strip_doc_comment_decoration(&comment, attr.span);
+ spans.extend_from_slice(¤t_spans);
+ doc.push_str(&comment);
+ } else if attr.check_name(sym!(doc)) {
// ignore mix of sugared and non-sugared doc
- return;
+ // don't trigger the safety or errors check
+ return DocHeaders {
+ safety: true,
+ errors: true,
+ };
}
}
current += offset_copy;
}
- if !doc.is_empty() {
- let parser = Parser::new(pulldown_cmark::Parser::new(&doc));
- let parser = parser.coalesce(|x, y| {
- use pulldown_cmark::Event::*;
-
- let x_offset = x.0;
- let y_offset = y.0;
-
- match (x.1, y.1) {
- (Text(x), Text(y)) => {
- let mut x = x.into_owned();
- x.push_str(&y);
- Ok((x_offset, Text(x.into())))
- },
- (x, y) => Err(((x_offset, x), (y_offset, y))),
- }
- });
- check_doc(cx, valid_idents, parser, &spans);
+ if doc.is_empty() {
+ return DocHeaders {
+ safety: false,
+ errors: false,
+ };
}
+
+ let parser = pulldown_cmark::Parser::new(&doc).into_offset_iter();
+ // Iterate over all `Events` and combine consecutive events into one
+ let events = parser.coalesce(|previous, current| {
+ use pulldown_cmark::Event::*;
+
+ let previous_range = previous.1;
+ let current_range = current.1;
+
+ match (previous.0, current.0) {
+ (Text(previous), Text(current)) => {
+ let mut previous = previous.to_string();
+ previous.push_str(¤t);
+ Ok((Text(previous.into()), previous_range))
+ },
+ (previous, current) => Err(((previous, previous_range), (current, current_range))),
+ }
+ });
+ check_doc(cx, valid_idents, events, &spans)
}
-fn check_doc<'a, Events: Iterator<Item = (usize, pulldown_cmark::Event<'a>)>>(
- cx: &EarlyContext<'_>,
+fn check_doc<'a, Events: Iterator<Item = (pulldown_cmark::Event<'a>, Range<usize>)>>(
+ cx: &LateContext<'_, '_>,
valid_idents: &FxHashSet<String>,
- docs: Events,
+ events: Events,
spans: &[(usize, Span)],
-) {
+) -> DocHeaders {
+ // true if a safety header was found
use pulldown_cmark::Event::*;
use pulldown_cmark::Tag::*;
+ let mut headers = DocHeaders {
+ safety: false,
+ errors: false,
+ };
let mut in_code = false;
let mut in_link = None;
+ let mut in_heading = false;
- for (offset, event) in docs {
+ for (event, range) in events {
match event {
- Start(CodeBlock(_)) | Start(Code) => in_code = true,
- End(CodeBlock(_)) | End(Code) => in_code = false,
- Start(Link(link, _)) => in_link = Some(link),
- End(Link(_, _)) => in_link = None,
- Start(_tag) | End(_tag) => (), // We don't care about other tags
- Html(_html) | InlineHtml(_html) => (), // HTML is weird, just ignore it
- SoftBreak | HardBreak => (),
+ Start(CodeBlock(_)) => in_code = true,
+ End(CodeBlock(_)) => in_code = false,
+ Start(Link(_, url, _)) => in_link = Some(url),
+ End(Link(..)) => in_link = None,
+ Start(Heading(_)) => in_heading = true,
+ End(Heading(_)) => in_heading = false,
+ Start(_tag) | End(_tag) => (), // We don't care about other tags
+ Html(_html) => (), // HTML is weird, just ignore it
+ SoftBreak | HardBreak | TaskListMarker(_) | Code(_) | Rule => (),
FootnoteReference(text) | Text(text) => {
if Some(&text) == in_link.as_ref() {
// Probably a link of the form `<http://example.com>`
// text "http://example.com" by pulldown-cmark
continue;
}
-
- if !in_code {
- let index = match spans.binary_search_by(|c| c.0.cmp(&offset)) {
- Ok(o) => o,
- Err(e) => e - 1,
- };
-
- let (begin, span) = spans[index];
-
+ headers.safety |= in_heading && text.trim() == "Safety";
+ headers.errors |= in_heading && text.trim() == "Errors";
+ let index = match spans.binary_search_by(|c| c.0.cmp(&range.start)) {
+ Ok(o) => o,
+ Err(e) => e - 1,
+ };
+ let (begin, span) = spans[index];
+ if in_code {
+ check_code(cx, &text, span);
+ } else {
// Adjust for the beginning of the current `Event`
- let span = span.with_lo(span.lo() + BytePos::from_usize(offset - begin));
+ let span = span.with_lo(span.lo() + BytePos::from_usize(range.start - begin));
check_text(cx, valid_idents, &text, span);
}
},
}
}
+ headers
+}
+
+fn check_code(cx: &LateContext<'_, '_>, text: &str, span: Span) {
+ if text.contains("fn main() {") && !(text.contains("static") || text.contains("fn main() {}")) {
+ span_lint(cx, NEEDLESS_DOCTEST_MAIN, span, "needless `fn main` in doctest");
+ }
}
-fn check_text(cx: &EarlyContext<'_>, valid_idents: &FxHashSet<String>, text: &str, span: Span) {
+fn check_text(cx: &LateContext<'_, '_>, valid_idents: &FxHashSet<String>, text: &str, span: Span) {
for word in text.split(|c: char| c.is_whitespace() || c == '\'') {
// Trim punctuation as in `some comment (see foo::bar).`
// ^^
}
}
-fn check_word(cx: &EarlyContext<'_>, word: &str, span: Span) {
+fn check_word(cx: &LateContext<'_, '_>, word: &str, span: Span) {
/// Checks if a string is camel-case, i.e., contains at least two uppercase
- /// letters (`Clippy` is
- /// ok) and one lower-case letter (`NASA` is ok). Plural are also excluded
- /// (`IDs` is ok).
+ /// letters (`Clippy` is ok) and one lower-case letter (`NASA` is ok).
+ /// Plurals are also excluded (`IDs` is ok).
fn is_camel_case(s: &str) -> bool {
if s.starts_with(|c: char| c.is_digit(10)) {
return false;