Fixed some spelling mistakes

This commit is contained in:
Armin Ronacher 2021-02-03 22:57:25 +01:00
parent 81ba51fc3c
commit 452c79bac5
6 changed files with 11 additions and 11 deletions

View file

@ -12,7 +12,7 @@
//! diff hook is not used outside of the raw algorithm implementations as for
//! most situations access to the values is useful of required.
//!
//! The algoriths module really is the most low-level module in similar and
//! The algorithms module really is the most low-level module in similar and
//! generally not the place to start.
//!
//! # Example
@ -30,7 +30,7 @@
//! let ops = d.into_inner().into_ops();
//! ```
//!
//! The above example is equivalen to using
//! The above example is equivalent to using
//! [`capture_diff_slices`](crate::capture_diff_slices).
mod capture;

View file

@ -82,10 +82,10 @@
//! by enabling the `bytes` feature it gains support for byte slices with some
//! caveats.
//!
//! A lot of text diff functionality assumes that what is being diffed constiutes
//! A lot of text diff functionality assumes that what is being diffed constitutes
//! text, but in the real world it can often be challenging to ensure that this is
//! all valid utf-8. Because of this the crate is built so that most functinality
//! also still works with bytes for as long as they are roughtly ASCII compatible.
//! all valid utf-8. Because of this the crate is built so that most functionality
//! also still works with bytes for as long as they are roughly ASCII compatible.
//!
//! This means you will be successful in creating a unified diff from latin1
//! encoded bytes but if you try to do the same with EBCDIC encoded bytes you

View file

@ -62,14 +62,14 @@ pub trait DiffableStr: Hash + PartialEq + PartialOrd + Ord + Eq + ToOwned {
/// Tokenizes into words.
fn tokenize_words(&self) -> Vec<&Self>;
/// Splits the input into characters.
/// Tokenizes the input into characters.
fn tokenize_chars(&self) -> Vec<&Self>;
/// Splits into unicode words.
/// Tokenizes into unicode words.
#[cfg(feature = "unicode")]
fn tokenize_unicode_words(&self) -> Vec<&Self>;
/// Splits into unicode graphemes..
/// Tokenizes into unicode graphemes.
#[cfg(feature = "unicode")]
fn tokenize_graphemes(&self) -> Vec<&Self>;

View file

@ -364,7 +364,7 @@ pub fn get_close_matches<'a, T: DiffableStr + ?Sized>(
let diff = TextDiff::from_slices(&seq1, &seq2);
let ratio = diff.ratio();
if ratio >= cutoff {
// we're putting the word iself in reverse in so that matches with
// we're putting the word itself in reverse in so that matches with
// the same ratio are ordered lexicographically.
matches.push(((ratio * u32::MAX as f32) as u32, Reverse(possibility)));
}

View file

@ -345,7 +345,7 @@ mod text_additions {
use crate::text::DiffableStr;
use std::borrow::Cow;
/// The text interface can produce changes over [`DiffableStr`] implemeting
/// The text interface can produce changes over [`DiffableStr`] implementing
/// values. As those are generic interfaces for different types of strings
/// utility methods to make working with standard rust strings more enjoyable.
impl<'s, T: DiffableStr + ?Sized> Change<'s, T> {

View file

@ -146,7 +146,7 @@ impl<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> UnifiedDiff<'diff, 'old,
/// Sets a header to the diff.
///
/// `a` and `b` are the file names that are added to the top of the unified
/// file format. The names are accepted verbaitim which lets you encode
/// file format. The names are accepted verbatim which lets you encode
/// a timestamp into it when separated by a tab (`\t`). For more information
/// see [the unified diff format specification](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/diff.html#tag_20_34_10_07)
pub fn header(&mut self, a: &str, b: &str) -> &mut Self {