1 pub use CommentStyle::*;
4 use rustc_span::source_map::SourceMap;
5 use rustc_span::{BytePos, CharPos, FileName, Pos, Symbol};
12 #[derive(Clone, Copy, PartialEq, Debug)]
13 pub enum CommentStyle {
14 /// No code on either side of each line of the comment
16 /// Code exists to the left of the comment
18 /// Code before /* foo */ and after the comment
20 /// Just a manual blank line "\n\n", for layout
26 pub style: CommentStyle,
27 pub lines: Vec<String>,
31 pub fn is_line_doc_comment(s: &str) -> bool {
32 let res = (s.starts_with("///") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'/')
33 || s.starts_with("//!");
34 debug!("is {:?} a doc comment? {}", s, res);
38 pub fn is_block_doc_comment(s: &str) -> bool {
39 // Prevent `/**/` from being parsed as a doc comment
40 let res = ((s.starts_with("/**") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'*')
41 || s.starts_with("/*!"))
43 debug!("is {:?} a doc comment? {}", s, res);
47 // FIXME(#64197): Try to privatize this again.
48 pub fn is_doc_comment(s: &str) -> bool {
49 (s.starts_with("///") && is_line_doc_comment(s))
50 || s.starts_with("//!")
51 || (s.starts_with("/**") && is_block_doc_comment(s))
52 || s.starts_with("/*!")
55 pub fn doc_comment_style(comment: Symbol) -> ast::AttrStyle {
56 let comment = &comment.as_str();
57 assert!(is_doc_comment(comment));
58 if comment.starts_with("//!") || comment.starts_with("/*!") {
65 pub fn strip_doc_comment_decoration(comment: Symbol) -> String {
66 let comment = &comment.as_str();
68 /// remove whitespace-only lines from the start/end of lines
69 fn vertical_trim(lines: Vec<String>) -> Vec<String> {
71 let mut j = lines.len();
72 // first line of all-stars should be omitted
73 if !lines.is_empty() && lines[0].chars().all(|c| c == '*') {
77 while i < j && lines[i].trim().is_empty() {
80 // like the first, a last line of all stars should be omitted
81 if j > i && lines[j - 1].chars().skip(1).all(|c| c == '*') {
85 while j > i && lines[j - 1].trim().is_empty() {
92 /// remove a "[ \t]*\*" block from each line, if possible
93 fn horizontal_trim(lines: Vec<String>) -> Vec<String> {
94 let mut i = usize::MAX;
95 let mut can_trim = true;
99 for (j, c) in line.chars().enumerate() {
100 if j > i || !"* \t".contains(c) {
123 lines.iter().map(|line| (&line[i + 1..line.len()]).to_string()).collect()
129 // one-line comments lose their prefix
130 const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
132 for prefix in ONELINERS {
133 if comment.starts_with(*prefix) {
134 return (&comment[prefix.len()..]).to_string();
138 if comment.starts_with("/*") {
140 comment[3..comment.len() - 2].lines().map(|s| s.to_string()).collect::<Vec<String>>();
142 let lines = vertical_trim(lines);
143 let lines = horizontal_trim(lines);
145 return lines.join("\n");
148 panic!("not a doc-comment: {}", comment);
151 /// Returns `None` if the first `col` chars of `s` contain a non-whitespace char.
152 /// Otherwise returns `Some(k)` where `k` is first char offset after that leading
153 /// whitespace. Note that `k` may be outside bounds of `s`.
154 fn all_whitespace(s: &str, col: CharPos) -> Option<usize> {
156 for (i, ch) in s.char_indices().take(col.to_usize()) {
157 if !ch.is_whitespace() {
160 idx = i + ch.len_utf8();
165 fn trim_whitespace_prefix(s: &str, col: CharPos) -> &str {
167 match all_whitespace(&s, col) {
179 fn split_block_comment_into_lines(text: &str, col: CharPos) -> Vec<String> {
180 let mut res: Vec<String> = vec![];
181 let mut lines = text.lines();
182 // just push the first line
183 res.extend(lines.next().map(|it| it.to_string()));
184 // for other lines, strip common whitespace prefix
186 res.push(trim_whitespace_prefix(line, col).to_string())
191 // it appears this function is called only from pprust... that's
192 // probably not a good thing.
193 pub fn gather_comments(sm: &SourceMap, path: FileName, src: String) -> Vec<Comment> {
194 let sm = SourceMap::new(sm.path_mapping().clone());
195 let source_file = sm.new_source_file(path, src);
196 let text = (*source_file.src.as_ref().unwrap()).clone();
198 let text: &str = text.as_str();
199 let start_bpos = source_file.start_pos;
201 let mut comments: Vec<Comment> = Vec::new();
202 let mut code_to_the_left = false;
204 if let Some(shebang_len) = rustc_lexer::strip_shebang(text) {
205 comments.push(Comment {
207 lines: vec![text[..shebang_len].to_string()],
213 for token in rustc_lexer::tokenize(&text[pos..]) {
214 let token_text = &text[pos..pos + token.len];
216 rustc_lexer::TokenKind::Whitespace => {
217 if let Some(mut idx) = token_text.find('\n') {
218 code_to_the_left = false;
219 while let Some(next_newline) = &token_text[idx + 1..].find('\n') {
220 idx = idx + 1 + next_newline;
221 comments.push(Comment {
224 pos: start_bpos + BytePos((pos + idx) as u32),
229 rustc_lexer::TokenKind::BlockComment { terminated: _ } => {
230 if !is_block_doc_comment(token_text) {
231 let code_to_the_right = match text[pos + token.len..].chars().next() {
232 Some('\r' | '\n') => false,
235 let style = match (code_to_the_left, code_to_the_right) {
237 (false, false) => Isolated,
238 (true, false) => Trailing,
241 // Count the number of chars since the start of the line by rescanning.
242 let pos_in_file = start_bpos + BytePos(pos as u32);
243 let line_begin_in_file = source_file.line_begin_pos(pos_in_file);
244 let line_begin_pos = (line_begin_in_file - start_bpos).to_usize();
245 let col = CharPos(text[line_begin_pos..pos].chars().count());
247 let lines = split_block_comment_into_lines(token_text, col);
248 comments.push(Comment { style, lines, pos: pos_in_file })
251 rustc_lexer::TokenKind::LineComment => {
252 if !is_doc_comment(token_text) {
253 comments.push(Comment {
254 style: if code_to_the_left { Trailing } else { Isolated },
255 lines: vec![token_text.to_string()],
256 pos: start_bpos + BytePos(pos as u32),
261 code_to_the_left = true;