X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;f=clippy_lints%2Fsrc%2Fdoc.rs;h=6dbf2ea959e6a667f1be86e3a52079e48a3bf570;hb=93c48a0977927fc3678600d0d79ef43e8f30761f;hp=ed6b3a978a6f8fb937b895c037e9c1e1184707a6;hpb=aaf9bce90550b1c0a726fbf42bd317dc2ff8e6ed;p=rust.git diff --git a/clippy_lints/src/doc.rs b/clippy_lints/src/doc.rs index ed6b3a978a6..6dbf2ea959e 100644 --- a/clippy_lints/src/doc.rs +++ b/clippy_lints/src/doc.rs @@ -4,7 +4,7 @@ use syntax::ast; use syntax::codemap::{Span, BytePos}; use syntax_pos::Pos; -use utils::{span_lint, snippet_opt}; +use utils::span_lint; /// **What it does:** Checks for the presence of `_`, `::` or camel-case words /// outside ticks in documentation. @@ -19,7 +19,8 @@ /// /// **Examples:** /// ```rust -/// /// Do something with the foo_bar parameter. See also that::other::module::foo. +/// /// Do something with the foo_bar parameter. See also +/// that::other::module::foo. /// // ^ `foo_bar` and `that::other::module::foo` should be ticked. /// fn doit(foo_bar) { .. } /// ``` @@ -62,7 +63,7 @@ struct Parser<'a> { impl<'a> Parser<'a> { fn new(parser: pulldown_cmark::Parser<'a>) -> Parser<'a> { - Self { parser } + Self { parser: parser } } } @@ -78,10 +79,11 @@ fn next(&mut self) -> Option { /// Cleanup documentation decoration (`///` and such). /// /// We can't use `syntax::attr::AttributeMethods::with_desugared_doc` or -/// `syntax::parse::lexer::comments::strip_doc_comment_decoration` because we need to keep track of +/// `syntax::parse::lexer::comments::strip_doc_comment_decoration` because we +/// need to keep track of /// the spans but this function is inspired from the later. #[allow(cast_possible_truncation)] -pub fn strip_doc_comment_decoration(comment: String, span: Span) -> (String, Vec<(usize, Span)>) { +pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) { // one-line comments lose their prefix const ONELINERS: &'static [&'static str] = &["///!", "///", "//!", "//"]; for prefix in ONELINERS { @@ -91,7 +93,15 @@ pub fn strip_doc_comment_decoration(comment: String, span: Span) -> (String, Vec doc.push('\n'); return ( doc.to_owned(), - vec![(doc.len(), Span { lo: span.lo + BytePos(prefix.len() as u32), ..span })] + vec![ + ( + doc.len(), + Span { + lo: span.lo + BytePos(prefix.len() as u32), + ..span + } + ), + ], ); } } @@ -99,15 +109,39 @@ pub fn strip_doc_comment_decoration(comment: String, span: Span) -> (String, Vec if comment.starts_with("/*") { let doc = &comment[3..comment.len() - 2]; let mut sizes = vec![]; - + let mut contains_initial_stars = false; for line in doc.lines() { let offset = line.as_ptr() as usize - comment.as_ptr() as usize; debug_assert_eq!(offset as u32 as usize, offset); - - sizes.push((line.len(), Span { lo: span.lo + BytePos(offset as u32), ..span })); + contains_initial_stars |= line.trim_left().starts_with('*'); + // +1 for the newline + sizes.push(( + line.len() + 1, + Span { + lo: span.lo + BytePos(offset as u32), + ..span + }, + )); } - - return (doc.to_string(), sizes); + if !contains_initial_stars { + return (doc.to_string(), sizes); + } + // remove the initial '*'s if any + let mut no_stars = String::with_capacity(doc.len()); + for line in doc.lines() { + let mut chars = line.chars(); + while let Some(c) = chars.next() { + if c.is_whitespace() { + no_stars.push(c); + } else { + no_stars.push(if c == '*' { ' ' } else { c }); + break; + } + } + no_stars.push_str(chars.as_str()); + no_stars.push('\n'); + } + return (no_stars, sizes); } panic!("not a doc-comment: {}", comment); @@ -121,10 +155,15 @@ pub fn check_attrs<'a>(cx: &EarlyContext, valid_idents: &[String], attrs: &'a [a if attr.is_sugared_doc { if let Some(ref current) = attr.value_str() { let current = current.to_string(); - let (current, current_spans) = strip_doc_comment_decoration(current, attr.span); + let (current, current_spans) = strip_doc_comment_decoration(¤t, attr.span); spans.extend_from_slice(¤t_spans); doc.push_str(¤t); } + } else if let Some(name) = attr.name() { + // ignore mix of sugared and non-sugared doc + if name == "doc" { + return; + } } } @@ -144,7 +183,11 @@ pub fn check_attrs<'a>(cx: &EarlyContext, valid_idents: &[String], attrs: &'a [a let y_offset = y.0; match (x.1, y.1) { - (Text(x), Text(y)) => Ok((x_offset, Text((x.into_owned() + &y).into()))), + (Text(x), Text(y)) => { + let mut x = x.into_owned(); + x.push_str(&y); + Ok((x_offset, Text(x.into()))) + }, (x, y) => Err(((x_offset, x), (y_offset, y))), } }); @@ -152,46 +195,44 @@ pub fn check_attrs<'a>(cx: &EarlyContext, valid_idents: &[String], attrs: &'a [a } } -fn check_doc<'a, Events: Iterator)>>( +fn check_doc<'a, Events: Iterator)>>( cx: &EarlyContext, valid_idents: &[String], docs: Events, - spans: &[(usize, Span)] + spans: &[(usize, Span)], ) { use pulldown_cmark::Event::*; use pulldown_cmark::Tag::*; let mut in_code = false; - println!("{:?}", spans); for (offset, event) in docs { - println!("{:?}, {:?}", offset, event); match event { - Start(CodeBlock(_)) | Start(Code) => in_code = true, - End(CodeBlock(_)) | End(Code) => in_code = false, + Start(CodeBlock(_)) | + Start(Code) => in_code = true, + End(CodeBlock(_)) | + End(Code) => in_code = false, Start(_tag) | End(_tag) => (), // We don't care about other tags - Html(_html) | InlineHtml(_html) => (), // HTML is weird, just ignore it - FootnoteReference(footnote) => (), // TODO + Html(_html) | + InlineHtml(_html) => (), // HTML is weird, just ignore it SoftBreak => (), HardBreak => (), + FootnoteReference(text) | Text(text) => { if !in_code { let index = match spans.binary_search_by(|c| c.0.cmp(&offset)) { Ok(o) => o, - Err(e) => e-1, + Err(e) => e - 1, }; let (begin, span) = spans[index]; - println!("raw: {:?}, {}, {}, {:?}", snippet_opt(cx, span), offset, begin, span); - // Adjust for the begining of the current `Event` let span = Span { lo: span.lo + BytePos::from_usize(offset - begin), ..span }; - println!("adjusted: {:?}", snippet_opt(cx, span)); check_text(cx, valid_idents, &text, span); } }, @@ -223,8 +264,10 @@ fn check_text(cx: &EarlyContext, valid_idents: &[String], text: &str, span: Span } fn check_word(cx: &EarlyContext, word: &str, span: Span) { - /// Checks if a string is camel-case, ie. contains at least two uppercase letter (`Clippy` is - /// ok) and one lower-case letter (`NASA` is ok). Plural are also excluded (`IDs` is ok). + /// Checks if a string is camel-case, ie. contains at least two uppercase + /// letter (`Clippy` is + /// ok) and one lower-case letter (`NASA` is ok). Plural are also excluded + /// (`IDs` is ok). fn is_camel_case(s: &str) -> bool { if s.starts_with(|c: char| c.is_digit(10)) { return false; @@ -237,7 +280,7 @@ fn is_camel_case(s: &str) -> bool { }; s.chars().all(char::is_alphanumeric) && s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1 && - s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0 + s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0 } fn has_underscore(s: &str) -> bool { @@ -245,9 +288,11 @@ fn has_underscore(s: &str) -> bool { } if has_underscore(word) || word.contains("::") || is_camel_case(word) { - span_lint(cx, - DOC_MARKDOWN, - span, - &format!("you should put `{}` between ticks in the documentation", word)); + span_lint( + cx, + DOC_MARKDOWN, + span, + &format!("you should put `{}` between ticks in the documentation", word), + ); } }