+// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use crate::rustc::lint::{EarlyContext, EarlyLintPass, LintArray, LintPass};
+use crate::rustc::{declare_tool_lint, lint_array};
+use crate::syntax::ast;
+use crate::syntax::source_map::{BytePos, Span};
+use crate::syntax_pos::Pos;
+use crate::utils::span_lint;
use itertools::Itertools;
use pulldown_cmark;
-use rustc::lint::*;
-use syntax::ast;
-use syntax::codemap::{Span, BytePos};
-use syntax_pos::Pos;
-use utils::span_lint;
+use url::Url;
/// **What it does:** Checks for the presence of `_`, `::` or camel-case words
/// outside ticks in documentation.
///
/// **Why is this bad?** *Rustdoc* supports markdown formatting, `_`, `::` and
/// camel-case probably indicates some code which should be included between
-/// ticks. `_` can also be used for empasis in markdown, this lint tries to
+/// ticks. `_` can also be used for emphasis in markdown, this lint tries to
/// consider that.
///
/// **Known problems:** Lots of bad docs won’t be fixed, what the lint checks
/// **Examples:**
/// ```rust
/// /// Do something with the foo_bar parameter. See also
-/// that::other::module::foo.
+/// /// that::other::module::foo.
/// // ^ `foo_bar` and `that::other::module::foo` should be ticked.
/// fn doit(foo_bar) { .. }
/// ```
-declare_lint! {
+declare_clippy_lint! {
pub DOC_MARKDOWN,
- Warn,
+ pedantic,
"presence of `_`, `::` or camel-case outside backticks in documentation"
}
impl Doc {
pub fn new(valid_idents: Vec<String>) -> Self {
- Self { valid_idents: valid_idents }
+ Self { valid_idents }
}
}
}
impl EarlyLintPass for Doc {
- fn check_crate(&mut self, cx: &EarlyContext, krate: &ast::Crate) {
+ fn check_crate(&mut self, cx: &EarlyContext<'_>, krate: &ast::Crate) {
check_attrs(cx, &self.valid_idents, &krate.attrs);
}
- fn check_item(&mut self, cx: &EarlyContext, item: &ast::Item) {
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
check_attrs(cx, &self.valid_idents, &item.attrs);
}
}
impl<'a> Parser<'a> {
fn new(parser: pulldown_cmark::Parser<'a>) -> Self {
- Self { parser: parser }
+ Self { parser }
}
}
/// `syntax::parse::lexer::comments::strip_doc_comment_decoration` because we
/// need to keep track of
/// the spans but this function is inspired from the later.
-#[allow(cast_possible_truncation)]
+#[allow(clippy::cast_possible_truncation)]
pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) {
// one-line comments lose their prefix
- const ONELINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
+ const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
for prefix in ONELINERS {
if comment.starts_with(*prefix) {
let doc = &comment[prefix.len()..];
doc.push('\n');
return (
doc.to_owned(),
- vec![
- (
- doc.len(),
- Span {
- lo: span.lo + BytePos(prefix.len() as u32),
- ..span
- }
- ),
- ],
+ vec![(doc.len(), span.with_lo(span.lo() + BytePos(prefix.len() as u32)))],
);
}
}
debug_assert_eq!(offset as u32 as usize, offset);
contains_initial_stars |= line.trim_left().starts_with('*');
// +1 for the newline
- sizes.push((
- line.len() + 1,
- Span {
- lo: span.lo + BytePos(offset as u32),
- ..span
- },
- ));
+ sizes.push((line.len() + 1, span.with_lo(span.lo() + BytePos(offset as u32))));
}
if !contains_initial_stars {
return (doc.to_string(), sizes);
panic!("not a doc-comment: {}", comment);
}
-pub fn check_attrs<'a>(cx: &EarlyContext, valid_idents: &[String], attrs: &'a [ast::Attribute]) {
+pub fn check_attrs<'a>(cx: &EarlyContext<'_>, valid_idents: &[String], attrs: &'a [ast::Attribute]) {
let mut doc = String::new();
let mut spans = vec![];
spans.extend_from_slice(¤t_spans);
doc.push_str(¤t);
}
- } else if let Some(name) = attr.name() {
+ } else if attr.name() == "doc" {
// ignore mix of sugared and non-sugared doc
- if name == "doc" {
- return;
- }
+ return;
}
}
}
fn check_doc<'a, Events: Iterator<Item = (usize, pulldown_cmark::Event<'a>)>>(
- cx: &EarlyContext,
+ cx: &EarlyContext<'_>,
valid_idents: &[String],
docs: Events,
spans: &[(usize, Span)],
use pulldown_cmark::Tag::*;
let mut in_code = false;
+ let mut in_link = None;
for (offset, event) in docs {
match event {
- Start(CodeBlock(_)) |
- Start(Code) => in_code = true,
- End(CodeBlock(_)) |
- End(Code) => in_code = false,
- Start(_tag) | End(_tag) => (), // We don't care about other tags
- Html(_html) |
- InlineHtml(_html) => (), // HTML is weird, just ignore it
- SoftBreak => (),
- HardBreak => (),
- FootnoteReference(text) |
- Text(text) => {
+ Start(CodeBlock(_)) | Start(Code) => in_code = true,
+ End(CodeBlock(_)) | End(Code) => in_code = false,
+ Start(Link(link, _)) => in_link = Some(link),
+ End(Link(_, _)) => in_link = None,
+ Start(_tag) | End(_tag) => (), // We don't care about other tags
+ Html(_html) | InlineHtml(_html) => (), // HTML is weird, just ignore it
+ SoftBreak | HardBreak => (),
+ FootnoteReference(text) | Text(text) => {
+ if Some(&text) == in_link.as_ref() {
+ // Probably a link of the form `<http://example.com>`
+ // Which are represented as a link to "http://example.com" with
+ // text "http://example.com" by pulldown-cmark
+ continue;
+ }
+
if !in_code {
let index = match spans.binary_search_by(|c| c.0.cmp(&offset)) {
Ok(o) => o,
let (begin, span) = spans[index];
- // Adjust for the begining of the current `Event`
- let span = Span {
- lo: span.lo + BytePos::from_usize(offset - begin),
- ..span
- };
+ // Adjust for the beginning of the current `Event`
+ let span = span.with_lo(span.lo() + BytePos::from_usize(offset - begin));
check_text(cx, valid_idents, &text, span);
}
}
}
-fn check_text(cx: &EarlyContext, valid_idents: &[String], text: &str, span: Span) {
+fn check_text(cx: &EarlyContext<'_>, valid_idents: &[String], text: &str, span: Span) {
for word in text.split_whitespace() {
// Trim punctuation as in `some comment (see foo::bar).`
// ^^
// Adjust for the current word
let offset = word.as_ptr() as usize - text.as_ptr() as usize;
- let span = Span {
- lo: span.lo + BytePos::from_usize(offset),
- hi: span.lo + BytePos::from_usize(offset + word.len()),
- ..span
- };
+ let span = Span::new(
+ span.lo() + BytePos::from_usize(offset),
+ span.lo() + BytePos::from_usize(offset + word.len()),
+ span.ctxt(),
+ );
check_word(cx, word, span);
}
}
-fn check_word(cx: &EarlyContext, word: &str, span: Span) {
+fn check_word(cx: &EarlyContext<'_>, word: &str, span: Span) {
/// Checks if a string is camel-case, ie. contains at least two uppercase
/// letter (`Clippy` is
/// ok) and one lower-case letter (`NASA` is ok). Plural are also excluded
return false;
}
- let s = if s.ends_with('s') {
- &s[..s.len() - 1]
- } else {
- s
- };
+ let s = if s.ends_with('s') { &s[..s.len() - 1] } else { s };
- s.chars().all(char::is_alphanumeric) && s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1 &&
- s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0
+ s.chars().all(char::is_alphanumeric)
+ && s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1
+ && s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0
}
fn has_underscore(s: &str) -> bool {
s != "_" && !s.contains("\\_") && s.contains('_')
}
+ if let Ok(url) = Url::parse(word) {
+ // try to get around the fact that `foo::bar` parses as a valid URL
+ if !url.cannot_be_a_base() {
+ span_lint(
+ cx,
+ DOC_MARKDOWN,
+ span,
+ "you should put bare URLs between `<`/`>` or make a proper Markdown link",
+ );
+
+ return;
+ }
+ }
+
if has_underscore(word) || word.contains("::") || is_camel_case(word) {
span_lint(
cx,