//! This module uses libsyntax's lexer to provide token-based highlighting for
//! the HTML documentation generated by rustdoc.
//!
-//! If you just want to syntax highlighting for a Rust program, then you can use
-//! the `render_inner_with_highlighting` or `render_with_highlighting`
-//! functions. For more advanced use cases (if you want to supply your own css
-//! classes or control how the HTML is generated, or even generate something
-//! other then HTML), then you should implement the `Writer` trait and use a
-//! `Classifier`.
+//! Use the `render_with_highlighting` to highlight some rust code.
use html::escape::Escape;
use syntax_pos::{Span, FileName};
/// Highlights `src`, returning the HTML output.
-pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>,
+pub fn render_with_highlighting(src: &str, class: Option<&str>,
extension: Option<&str>,
tooltip: Option<(&str, &str)>) -> String {
debug!("highlighting: ================\n{}\n==============", src);
class='tooltiptext'>{}</span></div></div>",
class, tooltip).unwrap();
}
- write_header(class, id, &mut out).unwrap();
+ write_header(class, &mut out).unwrap();
let mut classifier = Classifier::new(lexer::StringReader::new(&sess, fm, None), sess.codemap());
if let Err(_) = classifier.write_source(&mut out) {
/// Processes a program (nested in the internal `lexer`), classifying strings of
/// text by highlighting category (`Class`). Calls out to a `Writer` to write
/// each span of text in sequence.
-pub struct Classifier<'a> {
+struct Classifier<'a> {
lexer: lexer::StringReader<'a>,
codemap: &'a CodeMap,
/// How a span of text is classified. Mostly corresponds to token kinds.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
-pub enum Class {
+enum Class {
None,
Comment,
DocComment,
/// The classifier will call into the `Writer` implementation as it finds spans
/// of text to highlight. Exactly how that text should be highlighted is up to
/// the implementation.
-pub trait Writer {
+trait Writer {
/// Called when we start processing a span of text that should be highlighted.
/// The `Class` argument specifies how it should be highlighted.
fn enter_span(&mut self, _: Class) -> io::Result<()>;
/// Called at the end of a span of highlighted text.
fn exit_span(&mut self) -> io::Result<()>;
- /// Called for a span of text, usually, but not always, a single token. If
- /// the string of text (`T`) does correspond to a token, then the token will
- /// also be passed. If the text should be highlighted differently from the
- /// surrounding text, then the `Class` argument will be a value other than
- /// `None`.
+ /// Called for a span of text. If the text should be highlighted differently from the
+ /// surrounding text, then the `Class` argument will be a value other than `None`.
+ ///
/// The following sequences of callbacks are equivalent:
/// ```plain
/// enter_span(Foo), string("text", None), exit_span()
/// more flexible.
fn string<T: Display>(&mut self,
text: T,
- klass: Class,
- tok: Option<&TokenAndSpan>)
+ klass: Class)
-> io::Result<()>;
}
impl<U: Write> Writer for U {
fn string<T: Display>(&mut self,
text: T,
- klass: Class,
- _tas: Option<&TokenAndSpan>)
+ klass: Class)
-> io::Result<()> {
match klass {
Class::None => write!(self, "{}", text),
}
impl<'a> Classifier<'a> {
- pub fn new(lexer: lexer::StringReader<'a>, codemap: &'a CodeMap) -> Classifier<'a> {
+ fn new(lexer: lexer::StringReader<'a>, codemap: &'a CodeMap) -> Classifier<'a> {
Classifier {
lexer,
codemap,
/// is used. All source code emission is done as slices from the source map,
/// not from the tokens themselves, in order to stay true to the original
/// source.
- pub fn write_source<W: Writer>(&mut self,
+ fn write_source<W: Writer>(&mut self,
out: &mut W)
-> io::Result<()> {
loop {
-> io::Result<()> {
let klass = match tas.tok {
token::Shebang(s) => {
- out.string(Escape(&s.as_str()), Class::None, Some(&tas))?;
+ out.string(Escape(&s.as_str()), Class::None)?;
return Ok(());
},
self.in_attribute = true;
out.enter_span(Class::Attribute)?;
}
- out.string("#", Class::None, None)?;
- out.string("!", Class::None, None)?;
+ out.string("#", Class::None)?;
+ out.string("!", Class::None)?;
return Ok(());
}
self.in_attribute = true;
out.enter_span(Class::Attribute)?;
}
- out.string("#", Class::None, None)?;
+ out.string("#", Class::None)?;
return Ok(());
}
token::CloseDelim(token::Bracket) => {
if self.in_attribute {
self.in_attribute = false;
- out.string("]", Class::None, None)?;
+ out.string("]", Class::None)?;
out.exit_span()?;
return Ok(());
} else {
// Anything that didn't return above is the simple case where we the
// class just spans a single token, so we can use the `string` method.
- out.string(Escape(&self.snip(tas.sp)), klass, Some(&tas))
+ out.string(Escape(&self.snip(tas.sp)), klass)
}
// Helper function to get a snippet from the codemap.
impl Class {
/// Returns the css class expected by rustdoc for each `Class`.
- pub fn rustdoc_class(self) -> &'static str {
+ fn rustdoc_class(self) -> &'static str {
match self {
Class::None => "",
Class::Comment => "comment",
}
}
-fn write_header(class: Option<&str>,
- id: Option<&str>,
- out: &mut dyn Write)
- -> io::Result<()> {
- write!(out, "<pre ")?;
- if let Some(id) = id {
- write!(out, "id='{}' ", id)?;
- }
- write!(out, "class=\"rust {}\">\n", class.unwrap_or(""))
+fn write_header(class: Option<&str>, out: &mut Write) -> io::Result<()> {
+ write!(out, "<pre class=\"rust {}\">\n", class.unwrap_or(""))
}
fn write_footer(out: &mut dyn Write) -> io::Result<()> {