2019-12-06 00:00:23 +00:00
|
|
|
|
use crate::utils::{match_type, paths, return_ty, span_lint};
|
2018-11-27 20:14:15 +00:00
|
|
|
|
use itertools::Itertools;
|
|
|
|
|
use pulldown_cmark;
|
2019-10-02 15:19:30 +00:00
|
|
|
|
use rustc::hir;
|
2019-12-03 23:16:03 +00:00
|
|
|
|
use rustc::impl_lint_pass;
|
2019-10-02 15:19:30 +00:00
|
|
|
|
use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
|
2018-12-29 17:07:10 +00:00
|
|
|
|
use rustc_data_structures::fx::FxHashSet;
|
2019-12-03 23:16:03 +00:00
|
|
|
|
use rustc_session::declare_tool_lint;
|
2019-04-24 05:47:01 +00:00
|
|
|
|
use std::ops::Range;
|
2019-11-07 08:34:45 +00:00
|
|
|
|
use syntax::ast::{AttrKind, Attribute};
|
2019-12-06 00:00:23 +00:00
|
|
|
|
use syntax::source_map::{BytePos, MultiSpan, Span};
|
2018-12-29 15:04:45 +00:00
|
|
|
|
use syntax_pos::Pos;
|
2017-06-19 19:23:50 +00:00
|
|
|
|
use url::Url;
|
2016-03-19 16:59:12 +00:00
|
|
|
|
|
2018-03-28 13:24:26 +00:00
|
|
|
|
declare_clippy_lint! {
|
2019-03-05 16:50:33 +00:00
|
|
|
|
/// **What it does:** Checks for the presence of `_`, `::` or camel-case words
|
|
|
|
|
/// outside ticks in documentation.
|
|
|
|
|
///
|
|
|
|
|
/// **Why is this bad?** *Rustdoc* supports markdown formatting, `_`, `::` and
|
|
|
|
|
/// camel-case probably indicates some code which should be included between
|
|
|
|
|
/// ticks. `_` can also be used for emphasis in markdown, this lint tries to
|
|
|
|
|
/// consider that.
|
|
|
|
|
///
|
|
|
|
|
/// **Known problems:** Lots of bad docs won’t be fixed, what the lint checks
|
|
|
|
|
/// for is limited, and there are still false positives.
|
|
|
|
|
///
|
|
|
|
|
/// **Examples:**
|
|
|
|
|
/// ```rust
|
|
|
|
|
/// /// Do something with the foo_bar parameter. See also
|
|
|
|
|
/// /// that::other::module::foo.
|
|
|
|
|
/// // ^ `foo_bar` and `that::other::module::foo` should be ticked.
|
2019-08-03 16:42:05 +00:00
|
|
|
|
/// fn doit(foo_bar: usize) {}
|
2019-03-05 16:50:33 +00:00
|
|
|
|
/// ```
|
2016-08-06 08:18:36 +00:00
|
|
|
|
pub DOC_MARKDOWN,
|
2018-03-28 13:24:26 +00:00
|
|
|
|
pedantic,
|
2016-08-06 08:18:36 +00:00
|
|
|
|
"presence of `_`, `::` or camel-case outside backticks in documentation"
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-09-11 16:39:02 +00:00
|
|
|
|
declare_clippy_lint! {
|
|
|
|
|
/// **What it does:** Checks for the doc comments of publicly visible
|
|
|
|
|
/// unsafe functions and warns if there is no `# Safety` section.
|
|
|
|
|
///
|
|
|
|
|
/// **Why is this bad?** Unsafe functions should document their safety
|
|
|
|
|
/// preconditions, so that users can be sure they are using them safely.
|
|
|
|
|
///
|
|
|
|
|
/// **Known problems:** None.
|
|
|
|
|
///
|
2019-12-06 00:00:23 +00:00
|
|
|
|
/// **Examples:**
|
2019-09-11 16:39:02 +00:00
|
|
|
|
/// ```rust
|
|
|
|
|
///# type Universe = ();
|
|
|
|
|
/// /// This function should really be documented
|
|
|
|
|
/// pub unsafe fn start_apocalypse(u: &mut Universe) {
|
|
|
|
|
/// unimplemented!();
|
|
|
|
|
/// }
|
|
|
|
|
/// ```
|
|
|
|
|
///
|
|
|
|
|
/// At least write a line about safety:
|
|
|
|
|
///
|
|
|
|
|
/// ```rust
|
|
|
|
|
///# type Universe = ();
|
|
|
|
|
/// /// # Safety
|
|
|
|
|
/// ///
|
|
|
|
|
/// /// This function should not be called before the horsemen are ready.
|
|
|
|
|
/// pub unsafe fn start_apocalypse(u: &mut Universe) {
|
|
|
|
|
/// unimplemented!();
|
|
|
|
|
/// }
|
|
|
|
|
/// ```
|
|
|
|
|
pub MISSING_SAFETY_DOC,
|
|
|
|
|
style,
|
|
|
|
|
"`pub unsafe fn` without `# Safety` docs"
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-06 00:00:23 +00:00
|
|
|
|
declare_clippy_lint! {
|
|
|
|
|
/// **What it does:** Checks the doc comments of publicly visible functions that
|
|
|
|
|
/// return a `Result` type and warns if there is no `# Errors` section.
|
|
|
|
|
///
|
|
|
|
|
/// **Why is this bad?** Documenting the type of errors that can be returned from a
|
|
|
|
|
/// function can help callers write code to handle the errors appropriately.
|
|
|
|
|
///
|
|
|
|
|
/// **Known problems:** None.
|
|
|
|
|
///
|
|
|
|
|
/// **Examples:**
|
|
|
|
|
///
|
|
|
|
|
/// Since the following function returns a `Result` it has an `# Errors` section in
|
|
|
|
|
/// its doc comment:
|
|
|
|
|
///
|
|
|
|
|
/// ```rust
|
|
|
|
|
///# use std::io;
|
|
|
|
|
/// /// # Errors
|
|
|
|
|
/// ///
|
|
|
|
|
/// /// Will return `Err` if `filename` does not exist or the user does not have
|
|
|
|
|
/// /// permission to read it.
|
|
|
|
|
/// pub fn read(filename: String) -> io::Result<String> {
|
|
|
|
|
/// unimplemented!();
|
|
|
|
|
/// }
|
|
|
|
|
/// ```
|
|
|
|
|
pub MISSING_ERRORS_DOC,
|
|
|
|
|
pedantic,
|
|
|
|
|
"`pub fn` returns `Result` without `# Errors` in doc comment"
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-30 22:10:24 +00:00
|
|
|
|
declare_clippy_lint! {
|
|
|
|
|
/// **What it does:** Checks for `fn main() { .. }` in doctests
|
|
|
|
|
///
|
|
|
|
|
/// **Why is this bad?** The test can be shorter (and likely more readable)
|
|
|
|
|
/// if the `fn main()` is left implicit.
|
|
|
|
|
///
|
|
|
|
|
/// **Known problems:** None.
|
|
|
|
|
///
|
|
|
|
|
/// **Examples:**
|
|
|
|
|
/// ``````rust
|
|
|
|
|
/// /// An example of a doctest with a `main()` function
|
|
|
|
|
/// ///
|
|
|
|
|
/// /// # Examples
|
|
|
|
|
/// ///
|
|
|
|
|
/// /// ```
|
|
|
|
|
/// /// fn main() {
|
|
|
|
|
/// /// // this needs not be in an `fn`
|
|
|
|
|
/// /// }
|
|
|
|
|
/// /// ```
|
|
|
|
|
/// fn needless_main() {
|
|
|
|
|
/// unimplemented!();
|
|
|
|
|
/// }
|
|
|
|
|
/// ``````
|
|
|
|
|
pub NEEDLESS_DOCTEST_MAIN,
|
|
|
|
|
style,
|
|
|
|
|
"presence of `fn main() {` in code examples"
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-08 20:43:55 +00:00
|
|
|
|
#[allow(clippy::module_name_repetitions)]
|
2016-04-04 18:18:17 +00:00
|
|
|
|
#[derive(Clone)]
|
2019-04-08 20:43:55 +00:00
|
|
|
|
pub struct DocMarkdown {
|
2018-12-29 17:07:10 +00:00
|
|
|
|
valid_idents: FxHashSet<String>,
|
2019-10-02 15:19:30 +00:00
|
|
|
|
in_trait_impl: bool,
|
2016-04-04 18:18:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-04-08 20:43:55 +00:00
|
|
|
|
impl DocMarkdown {
|
2018-12-29 17:07:10 +00:00
|
|
|
|
pub fn new(valid_idents: FxHashSet<String>) -> Self {
|
2019-10-02 15:19:30 +00:00
|
|
|
|
Self {
|
|
|
|
|
valid_idents,
|
|
|
|
|
in_trait_impl: false,
|
|
|
|
|
}
|
2016-04-04 18:18:17 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-19 16:59:12 +00:00
|
|
|
|
|
2019-12-06 00:00:23 +00:00
|
|
|
|
impl_lint_pass!(DocMarkdown => [DOC_MARKDOWN, MISSING_SAFETY_DOC, MISSING_ERRORS_DOC, NEEDLESS_DOCTEST_MAIN]);
|
2016-03-19 16:59:12 +00:00
|
|
|
|
|
2019-10-02 15:19:30 +00:00
|
|
|
|
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for DocMarkdown {
|
2019-12-22 14:42:41 +00:00
|
|
|
|
fn check_crate(&mut self, cx: &LateContext<'a, 'tcx>, krate: &'tcx hir::Crate<'_>) {
|
2016-05-02 12:36:33 +00:00
|
|
|
|
check_attrs(cx, &self.valid_idents, &krate.attrs);
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 14:42:41 +00:00
|
|
|
|
fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item<'_>) {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
let headers = check_attrs(cx, &self.valid_idents, &item.attrs);
|
2019-10-02 15:19:30 +00:00
|
|
|
|
match item.kind {
|
2019-11-08 20:12:08 +00:00
|
|
|
|
hir::ItemKind::Fn(ref sig, ..) => {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers);
|
2019-10-02 15:19:30 +00:00
|
|
|
|
},
|
|
|
|
|
hir::ItemKind::Impl(_, _, _, _, ref trait_ref, ..) => {
|
|
|
|
|
self.in_trait_impl = trait_ref.is_some();
|
|
|
|
|
},
|
|
|
|
|
_ => {},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 14:42:41 +00:00
|
|
|
|
fn check_item_post(&mut self, _cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item<'_>) {
|
2019-10-02 15:19:30 +00:00
|
|
|
|
if let hir::ItemKind::Impl(..) = item.kind {
|
|
|
|
|
self.in_trait_impl = false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 14:42:41 +00:00
|
|
|
|
fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::TraitItem<'_>) {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
let headers = check_attrs(cx, &self.valid_idents, &item.attrs);
|
2019-10-02 15:19:30 +00:00
|
|
|
|
if let hir::TraitItemKind::Method(ref sig, ..) = item.kind {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers);
|
2019-10-02 15:19:30 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 14:42:41 +00:00
|
|
|
|
fn check_impl_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::ImplItem<'_>) {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
let headers = check_attrs(cx, &self.valid_idents, &item.attrs);
|
|
|
|
|
if self.in_trait_impl {
|
2019-10-02 15:19:30 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if let hir::ImplItemKind::Method(ref sig, ..) = item.kind {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers);
|
2019-09-11 16:39:02 +00:00
|
|
|
|
}
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-06 00:00:23 +00:00
|
|
|
|
fn lint_for_missing_headers<'a, 'tcx>(
|
|
|
|
|
cx: &LateContext<'a, 'tcx>,
|
|
|
|
|
hir_id: hir::HirId,
|
|
|
|
|
span: impl Into<MultiSpan> + Copy,
|
|
|
|
|
sig: &hir::FnSig,
|
|
|
|
|
headers: DocHeaders,
|
|
|
|
|
) {
|
|
|
|
|
if !cx.access_levels.is_exported(hir_id) {
|
|
|
|
|
return; // Private functions do not require doc comments
|
|
|
|
|
}
|
|
|
|
|
if !headers.safety && sig.header.unsafety == hir::Unsafety::Unsafe {
|
|
|
|
|
span_lint(
|
|
|
|
|
cx,
|
|
|
|
|
MISSING_SAFETY_DOC,
|
|
|
|
|
span,
|
|
|
|
|
"unsafe function's docs miss `# Safety` section",
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
if !headers.errors && match_type(cx, return_ty(cx, hir_id), &paths::RESULT) {
|
|
|
|
|
span_lint(
|
|
|
|
|
cx,
|
|
|
|
|
MISSING_ERRORS_DOC,
|
|
|
|
|
span,
|
|
|
|
|
"docs for function returning `Result` missing `# Errors` section",
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-08 16:18:45 +00:00
|
|
|
|
/// Cleanup documentation decoration (`///` and such).
|
|
|
|
|
///
|
|
|
|
|
/// We can't use `syntax::attr::AttributeMethods::with_desugared_doc` or
|
2017-08-09 07:30:56 +00:00
|
|
|
|
/// `syntax::parse::lexer::comments::strip_doc_comment_decoration` because we
|
|
|
|
|
/// need to keep track of
|
2017-05-28 22:12:43 +00:00
|
|
|
|
/// the spans but this function is inspired from the later.
|
2018-08-01 20:48:41 +00:00
|
|
|
|
#[allow(clippy::cast_possible_truncation)]
|
2019-09-18 06:37:41 +00:00
|
|
|
|
#[must_use]
|
2017-05-30 17:28:44 +00:00
|
|
|
|
pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) {
|
2016-07-08 16:18:45 +00:00
|
|
|
|
// one-line comments lose their prefix
|
2017-10-20 12:41:24 +00:00
|
|
|
|
const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
|
2016-07-08 16:18:45 +00:00
|
|
|
|
for prefix in ONELINERS {
|
|
|
|
|
if comment.starts_with(*prefix) {
|
2017-05-28 22:12:43 +00:00
|
|
|
|
let doc = &comment[prefix.len()..];
|
|
|
|
|
let mut doc = doc.to_owned();
|
|
|
|
|
doc.push('\n');
|
2017-08-09 07:30:56 +00:00
|
|
|
|
return (
|
|
|
|
|
doc.to_owned(),
|
2018-11-27 20:14:15 +00:00
|
|
|
|
vec![(doc.len(), span.with_lo(span.lo() + BytePos(prefix.len() as u32)))],
|
2017-08-09 07:30:56 +00:00
|
|
|
|
);
|
2016-07-08 16:18:45 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if comment.starts_with("/*") {
|
2017-05-28 22:12:43 +00:00
|
|
|
|
let doc = &comment[3..comment.len() - 2];
|
|
|
|
|
let mut sizes = vec![];
|
2017-08-19 20:52:49 +00:00
|
|
|
|
let mut contains_initial_stars = false;
|
2017-05-28 22:12:43 +00:00
|
|
|
|
for line in doc.lines() {
|
|
|
|
|
let offset = line.as_ptr() as usize - comment.as_ptr() as usize;
|
|
|
|
|
debug_assert_eq!(offset as u32 as usize, offset);
|
2018-12-14 11:35:44 +00:00
|
|
|
|
contains_initial_stars |= line.trim_start().starts_with('*');
|
2017-05-30 17:28:44 +00:00
|
|
|
|
// +1 for the newline
|
2017-09-03 21:15:15 +00:00
|
|
|
|
sizes.push((line.len() + 1, span.with_lo(span.lo() + BytePos(offset as u32))));
|
2017-05-28 22:12:43 +00:00
|
|
|
|
}
|
2017-08-19 20:52:49 +00:00
|
|
|
|
if !contains_initial_stars {
|
|
|
|
|
return (doc.to_string(), sizes);
|
|
|
|
|
}
|
|
|
|
|
// remove the initial '*'s if any
|
|
|
|
|
let mut no_stars = String::with_capacity(doc.len());
|
|
|
|
|
for line in doc.lines() {
|
|
|
|
|
let mut chars = line.chars();
|
|
|
|
|
while let Some(c) = chars.next() {
|
|
|
|
|
if c.is_whitespace() {
|
|
|
|
|
no_stars.push(c);
|
|
|
|
|
} else {
|
|
|
|
|
no_stars.push(if c == '*' { ' ' } else { c });
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
no_stars.push_str(chars.as_str());
|
|
|
|
|
no_stars.push('\n');
|
|
|
|
|
}
|
|
|
|
|
return (no_stars, sizes);
|
2016-07-08 16:18:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
panic!("not a doc-comment: {}", comment);
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-06 00:00:23 +00:00
|
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
|
struct DocHeaders {
|
|
|
|
|
safety: bool,
|
|
|
|
|
errors: bool,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn check_attrs<'a>(cx: &LateContext<'_, '_>, valid_idents: &FxHashSet<String>, attrs: &'a [Attribute]) -> DocHeaders {
|
2017-05-28 22:12:43 +00:00
|
|
|
|
let mut doc = String::new();
|
|
|
|
|
let mut spans = vec![];
|
2016-05-26 20:53:38 +00:00
|
|
|
|
|
2016-05-02 12:36:33 +00:00
|
|
|
|
for attr in attrs {
|
2019-11-07 08:34:45 +00:00
|
|
|
|
if let AttrKind::DocComment(ref comment) = attr.kind {
|
|
|
|
|
let comment = comment.to_string();
|
|
|
|
|
let (comment, current_spans) = strip_doc_comment_decoration(&comment, attr.span);
|
|
|
|
|
spans.extend_from_slice(¤t_spans);
|
|
|
|
|
doc.push_str(&comment);
|
2019-05-17 21:53:54 +00:00
|
|
|
|
} else if attr.check_name(sym!(doc)) {
|
2017-05-30 17:50:07 +00:00
|
|
|
|
// ignore mix of sugared and non-sugared doc
|
2019-12-06 00:00:23 +00:00
|
|
|
|
// don't trigger the safety or errors check
|
|
|
|
|
return DocHeaders {
|
|
|
|
|
safety: true,
|
|
|
|
|
errors: true,
|
|
|
|
|
};
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-11 21:22:30 +00:00
|
|
|
|
|
2017-05-28 22:12:43 +00:00
|
|
|
|
let mut current = 0;
|
|
|
|
|
for &mut (ref mut offset, _) in &mut spans {
|
|
|
|
|
let offset_copy = *offset;
|
|
|
|
|
*offset = current;
|
|
|
|
|
current += offset_copy;
|
2016-05-26 20:53:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-09-11 16:39:02 +00:00
|
|
|
|
if doc.is_empty() {
|
2019-12-06 00:00:23 +00:00
|
|
|
|
return DocHeaders {
|
|
|
|
|
safety: false,
|
|
|
|
|
errors: false,
|
|
|
|
|
};
|
2016-05-26 20:53:38 +00:00
|
|
|
|
}
|
2019-09-11 16:39:02 +00:00
|
|
|
|
|
|
|
|
|
let parser = pulldown_cmark::Parser::new(&doc).into_offset_iter();
|
|
|
|
|
// Iterate over all `Events` and combine consecutive events into one
|
|
|
|
|
let events = parser.coalesce(|previous, current| {
|
|
|
|
|
use pulldown_cmark::Event::*;
|
|
|
|
|
|
|
|
|
|
let previous_range = previous.1;
|
|
|
|
|
let current_range = current.1;
|
|
|
|
|
|
|
|
|
|
match (previous.0, current.0) {
|
|
|
|
|
(Text(previous), Text(current)) => {
|
|
|
|
|
let mut previous = previous.to_string();
|
|
|
|
|
previous.push_str(¤t);
|
|
|
|
|
Ok((Text(previous.into()), previous_range))
|
|
|
|
|
},
|
|
|
|
|
(previous, current) => Err(((previous, previous_range), (current, current_range))),
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
check_doc(cx, valid_idents, events, &spans)
|
2017-05-28 22:12:43 +00:00
|
|
|
|
}
|
2016-05-26 20:53:38 +00:00
|
|
|
|
|
2019-04-24 05:47:01 +00:00
|
|
|
|
fn check_doc<'a, Events: Iterator<Item = (pulldown_cmark::Event<'a>, Range<usize>)>>(
|
2019-10-02 15:19:30 +00:00
|
|
|
|
cx: &LateContext<'_, '_>,
|
2018-12-29 17:07:10 +00:00
|
|
|
|
valid_idents: &FxHashSet<String>,
|
2019-04-24 05:47:01 +00:00
|
|
|
|
events: Events,
|
2017-08-09 07:30:56 +00:00
|
|
|
|
spans: &[(usize, Span)],
|
2019-12-06 00:00:23 +00:00
|
|
|
|
) -> DocHeaders {
|
2019-09-11 16:39:02 +00:00
|
|
|
|
// true if a safety header was found
|
2017-05-28 22:12:43 +00:00
|
|
|
|
use pulldown_cmark::Event::*;
|
|
|
|
|
use pulldown_cmark::Tag::*;
|
|
|
|
|
|
2019-12-06 00:00:23 +00:00
|
|
|
|
let mut headers = DocHeaders {
|
|
|
|
|
safety: false,
|
|
|
|
|
errors: false,
|
|
|
|
|
};
|
2017-05-28 22:12:43 +00:00
|
|
|
|
let mut in_code = false;
|
2017-06-18 21:00:14 +00:00
|
|
|
|
let mut in_link = None;
|
2019-09-11 16:39:02 +00:00
|
|
|
|
let mut in_heading = false;
|
2017-05-28 22:12:43 +00:00
|
|
|
|
|
2019-04-24 05:47:01 +00:00
|
|
|
|
for (event, range) in events {
|
2017-05-28 22:12:43 +00:00
|
|
|
|
match event {
|
2019-04-24 05:47:01 +00:00
|
|
|
|
Start(CodeBlock(_)) => in_code = true,
|
|
|
|
|
End(CodeBlock(_)) => in_code = false,
|
|
|
|
|
Start(Link(_, url, _)) => in_link = Some(url),
|
|
|
|
|
End(Link(..)) => in_link = None,
|
2019-09-11 16:39:02 +00:00
|
|
|
|
Start(Heading(_)) => in_heading = true,
|
|
|
|
|
End(Heading(_)) => in_heading = false,
|
|
|
|
|
Start(_tag) | End(_tag) => (), // We don't care about other tags
|
|
|
|
|
Html(_html) => (), // HTML is weird, just ignore it
|
|
|
|
|
SoftBreak | HardBreak | TaskListMarker(_) | Code(_) | Rule => (),
|
2017-09-05 09:33:04 +00:00
|
|
|
|
FootnoteReference(text) | Text(text) => {
|
2017-06-18 21:00:14 +00:00
|
|
|
|
if Some(&text) == in_link.as_ref() {
|
|
|
|
|
// Probably a link of the form `<http://example.com>`
|
|
|
|
|
// Which are represented as a link to "http://example.com" with
|
|
|
|
|
// text "http://example.com" by pulldown-cmark
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2019-12-06 00:00:23 +00:00
|
|
|
|
headers.safety |= in_heading && text.trim() == "Safety";
|
|
|
|
|
headers.errors |= in_heading && text.trim() == "Errors";
|
2019-09-30 22:10:24 +00:00
|
|
|
|
let index = match spans.binary_search_by(|c| c.0.cmp(&range.start)) {
|
|
|
|
|
Ok(o) => o,
|
|
|
|
|
Err(e) => e - 1,
|
|
|
|
|
};
|
|
|
|
|
let (begin, span) = spans[index];
|
|
|
|
|
if in_code {
|
|
|
|
|
check_code(cx, &text, span);
|
|
|
|
|
} else {
|
2017-10-31 07:34:27 +00:00
|
|
|
|
// Adjust for the beginning of the current `Event`
|
2019-04-24 05:47:01 +00:00
|
|
|
|
let span = span.with_lo(span.lo() + BytePos::from_usize(range.start - begin));
|
2019-10-02 15:19:30 +00:00
|
|
|
|
|
2017-05-28 22:12:43 +00:00
|
|
|
|
check_text(cx, valid_idents, &text, span);
|
2016-05-28 01:18:52 +00:00
|
|
|
|
}
|
2017-05-28 22:12:43 +00:00
|
|
|
|
},
|
2016-05-26 20:53:38 +00:00
|
|
|
|
}
|
2016-05-02 12:36:48 +00:00
|
|
|
|
}
|
2019-12-06 00:00:23 +00:00
|
|
|
|
headers
|
2017-05-28 22:12:43 +00:00
|
|
|
|
}
|
2016-05-02 12:36:48 +00:00
|
|
|
|
|
2019-10-02 15:19:30 +00:00
|
|
|
|
fn check_code(cx: &LateContext<'_, '_>, text: &str, span: Span) {
|
2019-11-29 20:47:26 +00:00
|
|
|
|
if text.contains("fn main() {") && !(text.contains("static") || text.contains("fn main() {}")) {
|
2019-09-30 22:10:24 +00:00
|
|
|
|
span_lint(cx, NEEDLESS_DOCTEST_MAIN, span, "needless `fn main` in doctest");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-02 15:19:30 +00:00
|
|
|
|
fn check_text(cx: &LateContext<'_, '_>, valid_idents: &FxHashSet<String>, text: &str, span: Span) {
|
2018-12-07 10:48:06 +00:00
|
|
|
|
for word in text.split(|c: char| c.is_whitespace() || c == '\'') {
|
2017-05-28 22:12:43 +00:00
|
|
|
|
// Trim punctuation as in `some comment (see foo::bar).`
|
|
|
|
|
// ^^
|
|
|
|
|
// Or even as in `_foo bar_` which is emphasized.
|
|
|
|
|
let word = word.trim_matches(|c: char| !c.is_alphanumeric());
|
2016-05-05 19:42:59 +00:00
|
|
|
|
|
2018-12-29 17:07:10 +00:00
|
|
|
|
if valid_idents.contains(word) {
|
2017-05-28 22:12:43 +00:00
|
|
|
|
continue;
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
2016-05-26 20:53:38 +00:00
|
|
|
|
|
2017-05-29 22:11:08 +00:00
|
|
|
|
// Adjust for the current word
|
|
|
|
|
let offset = word.as_ptr() as usize - text.as_ptr() as usize;
|
2017-08-31 12:47:45 +00:00
|
|
|
|
let span = Span::new(
|
|
|
|
|
span.lo() + BytePos::from_usize(offset),
|
|
|
|
|
span.lo() + BytePos::from_usize(offset + word.len()),
|
|
|
|
|
span.ctxt(),
|
|
|
|
|
);
|
2017-05-29 22:11:08 +00:00
|
|
|
|
|
2017-05-28 22:12:43 +00:00
|
|
|
|
check_word(cx, word, span);
|
|
|
|
|
}
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-10-02 15:19:30 +00:00
|
|
|
|
fn check_word(cx: &LateContext<'_, '_>, word: &str, span: Span) {
|
2019-01-31 01:15:29 +00:00
|
|
|
|
/// Checks if a string is camel-case, i.e., contains at least two uppercase
|
2019-03-10 17:19:47 +00:00
|
|
|
|
/// letters (`Clippy` is ok) and one lower-case letter (`NASA` is ok).
|
|
|
|
|
/// Plurals are also excluded (`IDs` is ok).
|
2016-03-19 16:59:12 +00:00
|
|
|
|
fn is_camel_case(s: &str) -> bool {
|
2016-04-04 18:18:17 +00:00
|
|
|
|
if s.starts_with(|c: char| c.is_digit(10)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-27 20:14:15 +00:00
|
|
|
|
let s = if s.ends_with('s') { &s[..s.len() - 1] } else { s };
|
2016-03-19 16:59:12 +00:00
|
|
|
|
|
2018-11-27 20:14:15 +00:00
|
|
|
|
s.chars().all(char::is_alphanumeric)
|
|
|
|
|
&& s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1
|
2017-11-04 19:55:56 +00:00
|
|
|
|
&& s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-28 16:00:24 +00:00
|
|
|
|
fn has_underscore(s: &str) -> bool {
|
|
|
|
|
s != "_" && !s.contains("\\_") && s.contains('_')
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-07 21:38:45 +00:00
|
|
|
|
fn has_hyphen(s: &str) -> bool {
|
|
|
|
|
s != "-" && s.contains('-')
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-19 19:23:50 +00:00
|
|
|
|
if let Ok(url) = Url::parse(word) {
|
|
|
|
|
// try to get around the fact that `foo::bar` parses as a valid URL
|
|
|
|
|
if !url.cannot_be_a_base() {
|
2017-11-04 19:55:56 +00:00
|
|
|
|
span_lint(
|
|
|
|
|
cx,
|
|
|
|
|
DOC_MARKDOWN,
|
|
|
|
|
span,
|
|
|
|
|
"you should put bare URLs between `<`/`>` or make a proper Markdown link",
|
|
|
|
|
);
|
2017-06-19 19:23:50 +00:00
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-07 21:38:45 +00:00
|
|
|
|
// We assume that mixed-case words are not meant to be put inside bacticks. (Issue #2343)
|
|
|
|
|
if has_underscore(word) && has_hyphen(word) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-28 16:00:24 +00:00
|
|
|
|
if has_underscore(word) || word.contains("::") || is_camel_case(word) {
|
2017-08-09 07:30:56 +00:00
|
|
|
|
span_lint(
|
|
|
|
|
cx,
|
|
|
|
|
DOC_MARKDOWN,
|
|
|
|
|
span,
|
|
|
|
|
&format!("you should put `{}` between ticks in the documentation", word),
|
|
|
|
|
);
|
2016-03-19 16:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
}
|