1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // ignore-lexer-test FIXME #15679
13 //! Unicode string manipulation (`str` type)
17 //! Rust's string type is one of the core primitive types of the language. While
18 //! represented by the name `str`, the name `str` is not actually a valid type in
19 //! Rust. Each string must also be decorated with a pointer. `String` is used
20 //! for an owned string, so there is only one commonly-used `str` type in Rust:
23 //! `&str` is the borrowed string type. This type of string can only be created
24 //! from other strings, unless it is a static string (see below). As the word
25 //! "borrowed" implies, this type of string is owned elsewhere, and this string
26 //! cannot be moved out of.
28 //! As an example, here's some code that uses a string.
32 //! let borrowed_string = "This string is borrowed with the 'static lifetime";
36 //! From the example above, you can guess that Rust's string literals have the
37 //! `'static` lifetime. This is akin to C's concept of a static string.
38 //! More precisely, string literals are immutable views with a 'static lifetime
39 //! (otherwise known as the lifetime of the entire program), and thus have the
40 //! type `&'static str`.
44 //! Rust's string type, `str`, is a sequence of Unicode scalar values encoded as a
45 //! stream of UTF-8 bytes. All [strings](../../reference.html#literals) are
46 //! guaranteed to be validly encoded UTF-8 sequences. Additionally, strings are
47 //! not null-terminated and can thus contain null bytes.
49 //! The actual representation of strings have direct mappings to slices: `&str`
50 //! is the same as `&[u8]`.
52 #![doc(primitive = "str")]
53 #![stable(feature = "rust1", since = "1.0.0")]
55 use self::RecompositionState::*;
56 use self::DecompositionType::*;
58 use core::borrow::{BorrowFrom, ToOwned};
59 use core::char::CharExt;
60 use core::clone::Clone;
61 use core::iter::AdditiveIterator;
62 use core::iter::{Iterator, IteratorExt};
64 use core::ops::RangeFull;
65 use core::option::Option::{self, Some, None};
66 use core::result::Result;
67 use core::slice::AsSlice;
68 use core::str as core_str;
69 use unicode::str::{UnicodeStr, Utf16Encoder};
71 use ring_buf::RingBuf;
76 use slice::SliceConcatExt;
78 pub use core::str::{FromStr, Utf8Error, Str};
79 pub use core::str::{Lines, LinesAny, MatchIndices, SplitStr, CharRange};
80 pub use core::str::{Split, SplitTerminator};
81 pub use core::str::{SplitN, RSplitN};
82 pub use core::str::{from_utf8, CharEq, Chars, CharIndices, Bytes};
83 pub use core::str::{from_utf8_unchecked, from_c_str, ParseBoolError};
84 pub use unicode::str::{Words, Graphemes, GraphemeIndices};
87 Section: Creating a string
90 impl<S: Str> SliceConcatExt<str, String> for [S] {
91 fn concat(&self) -> String {
92 let s = self.as_slice();
98 // `len` calculation may overflow but push_str will check boundaries
99 let len = s.iter().map(|s| s.as_slice().len()).sum();
100 let mut result = String::with_capacity(len);
103 result.push_str(s.as_slice())
109 fn connect(&self, sep: &str) -> String {
110 let s = self.as_slice();
113 return String::new();
121 // this is wrong without the guarantee that `self` is non-empty
122 // `len` calculation may overflow but push_str but will check boundaries
123 let len = sep.len() * (s.len() - 1)
124 + s.iter().map(|s| s.as_slice().len()).sum();
125 let mut result = String::with_capacity(len);
126 let mut first = true;
132 result.push_str(sep);
134 result.push_str(s.as_slice());
144 // Helper functions used for Unicode normalization
145 fn canonical_sort(comb: &mut [(char, u8)]) {
146 let len = comb.len();
148 let mut swapped = false;
150 let class_a = comb[j-1].1;
151 let class_b = comb[j].1;
152 if class_a != 0 && class_b != 0 && class_a > class_b {
157 if !swapped { break; }
162 enum DecompositionType {
167 /// External iterator for a string's decomposition's characters.
168 /// Use with the `std::iter` module.
170 #[unstable(feature = "collections")]
171 pub struct Decompositions<'a> {
172 kind: DecompositionType,
174 buffer: Vec<(char, u8)>,
178 #[stable(feature = "rust1", since = "1.0.0")]
179 impl<'a> Iterator for Decompositions<'a> {
183 fn next(&mut self) -> Option<char> {
184 match self.buffer.first() {
187 self.buffer.remove(0);
190 Some(&(c, _)) if self.sorted => {
191 self.buffer.remove(0);
194 _ => self.sorted = false
198 for ch in self.iter.by_ref() {
199 let buffer = &mut self.buffer;
200 let sorted = &mut self.sorted;
204 unicode::char::canonical_combining_class(d);
205 if class == 0 && !*sorted {
206 canonical_sort(buffer);
209 buffer.push((d, class));
213 unicode::char::decompose_canonical(ch, callback)
216 unicode::char::decompose_compatible(ch, callback)
227 canonical_sort(&mut self.buffer);
231 if self.buffer.is_empty() {
234 match self.buffer.remove(0) {
244 fn size_hint(&self) -> (usize, Option<usize>) {
245 let (lower, _) = self.iter.size_hint();
251 enum RecompositionState {
257 /// External iterator for a string's recomposition's characters.
258 /// Use with the `std::iter` module.
260 #[unstable(feature = "collections")]
261 pub struct Recompositions<'a> {
262 iter: Decompositions<'a>,
263 state: RecompositionState,
264 buffer: RingBuf<char>,
265 composee: Option<char>,
269 #[stable(feature = "rust1", since = "1.0.0")]
270 impl<'a> Iterator for Recompositions<'a> {
274 fn next(&mut self) -> Option<char> {
278 for ch in self.iter.by_ref() {
279 let ch_class = unicode::char::canonical_combining_class(ch);
280 if self.composee.is_none() {
284 self.composee = Some(ch);
287 let k = self.composee.clone().unwrap();
289 match self.last_ccc {
291 match unicode::char::compose(k, ch) {
293 self.composee = Some(r);
298 self.composee = Some(ch);
301 self.buffer.push_back(ch);
302 self.last_ccc = Some(ch_class);
307 if l_class >= ch_class {
308 // `ch` is blocked from `composee`
310 self.composee = Some(ch);
311 self.last_ccc = None;
312 self.state = Purging;
315 self.buffer.push_back(ch);
316 self.last_ccc = Some(ch_class);
319 match unicode::char::compose(k, ch) {
321 self.composee = Some(r);
325 self.buffer.push_back(ch);
326 self.last_ccc = Some(ch_class);
332 self.state = Finished;
333 if self.composee.is_some() {
334 return self.composee.take();
338 match self.buffer.pop_front() {
339 None => self.state = Composing,
344 match self.buffer.pop_front() {
345 None => return self.composee.take(),
354 /// External iterator for a string's UTF16 codeunits.
355 /// Use with the `std::iter` module.
357 #[unstable(feature = "collections")]
358 pub struct Utf16Units<'a> {
359 encoder: Utf16Encoder<Chars<'a>>
362 #[stable(feature = "rust1", since = "1.0.0")]
363 impl<'a> Iterator for Utf16Units<'a> {
367 fn next(&mut self) -> Option<u16> { self.encoder.next() }
370 fn size_hint(&self) -> (usize, Option<usize>) { self.encoder.size_hint() }
377 // Return the initial codepoint accumulator for the first byte.
378 // The first byte is special, only want bottom 5 bits for width 2, 4 bits
379 // for width 3, and 3 bits for width 4
380 macro_rules! utf8_first_byte {
381 ($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as u32)
384 // return the value of $ch updated with continuation byte $byte
385 macro_rules! utf8_acc_cont_byte {
386 ($ch:expr, $byte:expr) => (($ch << 6) | ($byte & 63u8) as u32)
389 #[unstable(feature = "collections", reason = "trait is unstable")]
390 impl BorrowFrom<String> for str {
391 fn borrow_from(owned: &String) -> &str { &owned[] }
394 #[unstable(feature = "collections", reason = "trait is unstable")]
395 impl ToOwned<String> for str {
396 fn to_owned(&self) -> String {
398 String::from_utf8_unchecked(self.as_bytes().to_owned())
408 Section: Trait implementations
411 /// Any string that can be represented as a slice.
412 #[stable(feature = "rust1", since = "1.0.0")]
413 pub trait StrExt: Index<RangeFull, Output = str> {
414 /// Escapes each char in `s` with `char::escape_default`.
415 #[unstable(feature = "collections",
416 reason = "return type may change to be an iterator")]
417 fn escape_default(&self) -> String {
418 self.chars().flat_map(|c| c.escape_default()).collect()
421 /// Escapes each char in `s` with `char::escape_unicode`.
422 #[unstable(feature = "collections",
423 reason = "return type may change to be an iterator")]
424 fn escape_unicode(&self) -> String {
425 self.chars().flat_map(|c| c.escape_unicode()).collect()
428 /// Replaces all occurrences of one string with another.
432 /// * `from` - The string to replace
433 /// * `to` - The replacement string
437 /// The original string with all occurrences of `from` replaced with `to`.
442 /// let s = "this is old";
444 /// assert_eq!(s.replace("old", "new"), "this is new");
446 /// // not found, so no change.
447 /// assert_eq!(s.replace("cookie monster", "little lamb"), s);
449 #[stable(feature = "rust1", since = "1.0.0")]
450 fn replace(&self, from: &str, to: &str) -> String {
451 let mut result = String::new();
452 let mut last_end = 0;
453 for (start, end) in self.match_indices(from) {
454 result.push_str(unsafe { self.slice_unchecked(last_end, start) });
458 result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) });
462 /// Returns an iterator over the string in Unicode Normalization Form D
463 /// (canonical decomposition).
465 #[unstable(feature = "collections",
466 reason = "this functionality may be moved to libunicode")]
467 fn nfd_chars(&self) -> Decompositions {
469 iter: self[].chars(),
476 /// Returns an iterator over the string in Unicode Normalization Form KD
477 /// (compatibility decomposition).
479 #[unstable(feature = "collections",
480 reason = "this functionality may be moved to libunicode")]
481 fn nfkd_chars(&self) -> Decompositions {
483 iter: self[].chars(),
490 /// An Iterator over the string in Unicode Normalization Form C
491 /// (canonical decomposition followed by canonical composition).
493 #[unstable(feature = "collections",
494 reason = "this functionality may be moved to libunicode")]
495 fn nfc_chars(&self) -> Recompositions {
497 iter: self.nfd_chars(),
499 buffer: RingBuf::new(),
505 /// An Iterator over the string in Unicode Normalization Form KC
506 /// (compatibility decomposition followed by canonical composition).
508 #[unstable(feature = "collections",
509 reason = "this functionality may be moved to libunicode")]
510 fn nfkc_chars(&self) -> Recompositions {
512 iter: self.nfkd_chars(),
514 buffer: RingBuf::new(),
520 /// Returns true if a string contains a string pattern.
524 /// - pat - The string pattern to look for
529 /// assert!("bananas".contains("nana"));
531 #[stable(feature = "rust1", since = "1.0.0")]
532 fn contains(&self, pat: &str) -> bool {
533 core_str::StrExt::contains(&self[], pat)
536 /// Returns true if a string contains a char pattern.
540 /// - pat - The char pattern to look for
545 /// assert!("hello".contains_char('e'));
547 #[unstable(feature = "collections",
548 reason = "might get removed in favour of a more generic contains()")]
549 fn contains_char<P: CharEq>(&self, pat: P) -> bool {
550 core_str::StrExt::contains_char(&self[], pat)
553 /// An iterator over the characters of `self`. Note, this iterates
554 /// over Unicode code-points, not Unicode graphemes.
559 /// let v: Vec<char> = "abc åäö".chars().collect();
560 /// assert_eq!(v, vec!['a', 'b', 'c', ' ', 'å', 'ä', 'ö']);
562 #[stable(feature = "rust1", since = "1.0.0")]
563 fn chars(&self) -> Chars {
564 core_str::StrExt::chars(&self[])
567 /// An iterator over the bytes of `self`
572 /// let v: Vec<u8> = "bors".bytes().collect();
573 /// assert_eq!(v, b"bors".to_vec());
575 #[stable(feature = "rust1", since = "1.0.0")]
576 fn bytes(&self) -> Bytes {
577 core_str::StrExt::bytes(&self[])
580 /// An iterator over the characters of `self` and their byte offsets.
581 #[stable(feature = "rust1", since = "1.0.0")]
582 fn char_indices(&self) -> CharIndices {
583 core_str::StrExt::char_indices(&self[])
586 /// An iterator over substrings of `self`, separated by characters
587 /// matched by the pattern `pat`.
592 /// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect();
593 /// assert_eq!(v, vec!["Mary", "had", "a", "little", "lamb"]);
595 /// let v: Vec<&str> = "abc1def2ghi".split(|c: char| c.is_numeric()).collect();
596 /// assert_eq!(v, vec!["abc", "def", "ghi"]);
598 /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect();
599 /// assert_eq!(v, vec!["lion", "", "tiger", "leopard"]);
601 /// let v: Vec<&str> = "".split('X').collect();
602 /// assert_eq!(v, vec![""]);
604 #[stable(feature = "rust1", since = "1.0.0")]
605 fn split<P: CharEq>(&self, pat: P) -> Split<P> {
606 core_str::StrExt::split(&self[], pat)
609 /// An iterator over substrings of `self`, separated by characters
610 /// matched by the pattern `pat`, restricted to splitting at most `count`
616 /// let v: Vec<&str> = "Mary had a little lambda".splitn(2, ' ').collect();
617 /// assert_eq!(v, vec!["Mary", "had", "a little lambda"]);
619 /// let v: Vec<&str> = "abc1def2ghi".splitn(1, |c: char| c.is_numeric()).collect();
620 /// assert_eq!(v, vec!["abc", "def2ghi"]);
622 /// let v: Vec<&str> = "lionXXtigerXleopard".splitn(2, 'X').collect();
623 /// assert_eq!(v, vec!["lion", "", "tigerXleopard"]);
625 /// let v: Vec<&str> = "abcXdef".splitn(0, 'X').collect();
626 /// assert_eq!(v, vec!["abcXdef"]);
628 /// let v: Vec<&str> = "".splitn(1, 'X').collect();
629 /// assert_eq!(v, vec![""]);
631 #[stable(feature = "rust1", since = "1.0.0")]
632 fn splitn<P: CharEq>(&self, count: usize, pat: P) -> SplitN<P> {
633 core_str::StrExt::splitn(&self[], count, pat)
636 /// An iterator over substrings of `self`, separated by characters
637 /// matched by the pattern `pat`.
639 /// Equivalent to `split`, except that the trailing substring
640 /// is skipped if empty (terminator semantics).
645 /// let v: Vec<&str> = "A.B.".split_terminator('.').collect();
646 /// assert_eq!(v, vec!["A", "B"]);
648 /// let v: Vec<&str> = "A..B..".split_terminator('.').collect();
649 /// assert_eq!(v, vec!["A", "", "B", ""]);
651 /// let v: Vec<&str> = "Mary had a little lamb".split(' ').rev().collect();
652 /// assert_eq!(v, vec!["lamb", "little", "a", "had", "Mary"]);
654 /// let v: Vec<&str> = "abc1def2ghi".split(|c: char| c.is_numeric()).rev().collect();
655 /// assert_eq!(v, vec!["ghi", "def", "abc"]);
657 /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').rev().collect();
658 /// assert_eq!(v, vec!["leopard", "tiger", "", "lion"]);
660 #[unstable(feature = "collections", reason = "might get removed")]
661 fn split_terminator<P: CharEq>(&self, pat: P) -> SplitTerminator<P> {
662 core_str::StrExt::split_terminator(&self[], pat)
665 /// An iterator over substrings of `self`, separated by characters
666 /// matched by the pattern `pat`, starting from the end of the string.
667 /// Restricted to splitting at most `count` times.
672 /// let v: Vec<&str> = "Mary had a little lamb".rsplitn(2, ' ').collect();
673 /// assert_eq!(v, vec!["lamb", "little", "Mary had a"]);
675 /// let v: Vec<&str> = "abc1def2ghi".rsplitn(1, |c: char| c.is_numeric()).collect();
676 /// assert_eq!(v, vec!["ghi", "abc1def"]);
678 /// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(2, 'X').collect();
679 /// assert_eq!(v, vec!["leopard", "tiger", "lionX"]);
681 #[stable(feature = "rust1", since = "1.0.0")]
682 fn rsplitn<P: CharEq>(&self, count: usize, pat: P) -> RSplitN<P> {
683 core_str::StrExt::rsplitn(&self[], count, pat)
686 /// An iterator over the start and end indices of the disjoint
687 /// matches of the pattern `pat` within `self`.
689 /// That is, each returned value `(start, end)` satisfies
690 /// `self.slice(start, end) == sep`. For matches of `sep` within
691 /// `self` that overlap, only the indices corresponding to the
692 /// first match are returned.
697 /// let v: Vec<(usize, usize)> = "abcXXXabcYYYabc".match_indices("abc").collect();
698 /// assert_eq!(v, vec![(0,3), (6,9), (12,15)]);
700 /// let v: Vec<(usize, usize)> = "1abcabc2".match_indices("abc").collect();
701 /// assert_eq!(v, vec![(1,4), (4,7)]);
703 /// let v: Vec<(usize, usize)> = "ababa".match_indices("aba").collect();
704 /// assert_eq!(v, vec![(0, 3)]); // only the first `aba`
706 #[unstable(feature = "collections",
707 reason = "might have its iterator type changed")]
708 fn match_indices<'a>(&'a self, pat: &'a str) -> MatchIndices<'a> {
709 core_str::StrExt::match_indices(&self[], pat)
712 /// An iterator over the substrings of `self` separated by the pattern `sep`.
717 /// let v: Vec<&str> = "abcXXXabcYYYabc".split_str("abc").collect();
718 /// assert_eq!(v, vec!["", "XXX", "YYY", ""]);
720 /// let v: Vec<&str> = "1abcabc2".split_str("abc").collect();
721 /// assert_eq!(v, vec!["1", "", "2"]);
723 #[unstable(feature = "collections",
724 reason = "might get removed in the future in favor of a more generic split()")]
725 fn split_str<'a>(&'a self, pat: &'a str) -> SplitStr<'a> {
726 core_str::StrExt::split_str(&self[], pat)
729 /// An iterator over the lines of a string (subsequences separated
730 /// by `\n`). This does not include the empty string after a
736 /// let four_lines = "foo\nbar\n\nbaz\n";
737 /// let v: Vec<&str> = four_lines.lines().collect();
738 /// assert_eq!(v, vec!["foo", "bar", "", "baz"]);
740 #[stable(feature = "rust1", since = "1.0.0")]
741 fn lines(&self) -> Lines {
742 core_str::StrExt::lines(&self[])
745 /// An iterator over the lines of a string, separated by either
746 /// `\n` or `\r\n`. As with `.lines()`, this does not include an
747 /// empty trailing line.
752 /// let four_lines = "foo\r\nbar\n\r\nbaz\n";
753 /// let v: Vec<&str> = four_lines.lines_any().collect();
754 /// assert_eq!(v, vec!["foo", "bar", "", "baz"]);
756 #[stable(feature = "rust1", since = "1.0.0")]
757 fn lines_any(&self) -> LinesAny {
758 core_str::StrExt::lines_any(&self[])
761 /// Deprecated: use `s[a .. b]` instead.
762 #[unstable(feature = "collections",
763 reason = "use slice notation [a..b] instead")]
764 #[deprecated(since = "1.0.0", reason = "use slice notation [a..b] instead")]
765 fn slice(&self, begin: usize, end: usize) -> &str;
767 /// Deprecated: use `s[a..]` instead.
768 #[unstable(feature = "collections",
769 reason = "use slice notation [a..b] instead")]
770 #[deprecated(since = "1.0.0", reason = "use slice notation [a..] instead")]
771 fn slice_from(&self, begin: usize) -> &str;
773 /// Deprecated: use `s[..a]` instead.
774 #[unstable(feature = "collections",
775 reason = "use slice notation [a..b] instead")]
776 #[deprecated(since = "1.0.0", reason = "use slice notation [..a] instead")]
777 fn slice_to(&self, end: usize) -> &str;
779 /// Returns a slice of the string from the character range
780 /// [`begin`..`end`).
782 /// That is, start at the `begin`-th code point of the string and
783 /// continue to the `end`-th code point. This does not detect or
784 /// handle edge cases such as leaving a combining character as the
785 /// first code point of the string.
787 /// Due to the design of UTF-8, this operation is `O(end)`.
788 /// See `slice`, `slice_to` and `slice_from` for `O(1)`
789 /// variants that use byte indices rather than code point
792 /// Panics if `begin` > `end` or the either `begin` or `end` are
793 /// beyond the last character of the string.
798 /// let s = "Löwe 老虎 Léopard";
799 /// assert_eq!(s.slice_chars(0, 4), "Löwe");
800 /// assert_eq!(s.slice_chars(5, 7), "老虎");
802 #[unstable(feature = "collections",
803 reason = "may have yet to prove its worth")]
804 fn slice_chars(&self, begin: usize, end: usize) -> &str {
805 core_str::StrExt::slice_chars(&self[], begin, end)
808 /// Takes a bytewise (not UTF-8) slice from a string.
810 /// Returns the substring from [`begin`..`end`).
812 /// Caller must check both UTF-8 character boundaries and the boundaries of
813 /// the entire slice as well.
814 #[stable(feature = "rust1", since = "1.0.0")]
815 unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
816 core_str::StrExt::slice_unchecked(&self[], begin, end)
819 /// Returns true if the pattern `pat` is a prefix of the string.
824 /// assert!("banana".starts_with("ba"));
826 #[stable(feature = "rust1", since = "1.0.0")]
827 fn starts_with(&self, pat: &str) -> bool {
828 core_str::StrExt::starts_with(&self[], pat)
831 /// Returns true if the pattern `pat` is a suffix of the string.
836 /// assert!("banana".ends_with("nana"));
838 #[stable(feature = "rust1", since = "1.0.0")]
839 fn ends_with(&self, pat: &str) -> bool {
840 core_str::StrExt::ends_with(&self[], pat)
843 /// Returns a string with all pre- and suffixes that match
844 /// the pattern `pat` repeatedly removed.
848 /// * pat - a string pattern
853 /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
854 /// let x: &[_] = &['1', '2'];
855 /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar");
856 /// assert_eq!("123foo1bar123".trim_matches(|c: char| c.is_numeric()), "foo1bar");
858 #[stable(feature = "rust1", since = "1.0.0")]
859 fn trim_matches<P: CharEq>(&self, pat: P) -> &str {
860 core_str::StrExt::trim_matches(&self[], pat)
863 /// Returns a string with all prefixes that match
864 /// the pattern `pat` repeatedly removed.
868 /// * pat - a string pattern
873 /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
874 /// let x: &[_] = &['1', '2'];
875 /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12");
876 /// assert_eq!("123foo1bar123".trim_left_matches(|c: char| c.is_numeric()), "foo1bar123");
878 #[stable(feature = "rust1", since = "1.0.0")]
879 fn trim_left_matches<P: CharEq>(&self, pat: P) -> &str {
880 core_str::StrExt::trim_left_matches(&self[], pat)
883 /// Returns a string with all suffixes that match
884 /// the pattern `pat` repeatedly removed.
888 /// * pat - a string pattern
893 /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar");
894 /// let x: &[_] = &['1', '2'];
895 /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar");
896 /// assert_eq!("123foo1bar123".trim_right_matches(|c: char| c.is_numeric()), "123foo1bar");
898 #[stable(feature = "rust1", since = "1.0.0")]
899 fn trim_right_matches<P: CharEq>(&self, pat: P) -> &str {
900 core_str::StrExt::trim_right_matches(&self[], pat)
903 /// Check that `index`-th byte lies at the start and/or end of a
904 /// UTF-8 code point sequence.
906 /// The start and end of the string (when `index == self.len()`)
907 /// are considered to be boundaries.
909 /// Panics if `index` is greater than `self.len()`.
914 /// let s = "Löwe 老虎 Léopard";
915 /// assert!(s.is_char_boundary(0));
917 /// assert!(s.is_char_boundary(6));
918 /// assert!(s.is_char_boundary(s.len()));
920 /// // second byte of `ö`
921 /// assert!(!s.is_char_boundary(2));
923 /// // third byte of `老`
924 /// assert!(!s.is_char_boundary(8));
926 #[unstable(feature = "collections",
927 reason = "naming is uncertain with container conventions")]
928 fn is_char_boundary(&self, index: usize) -> bool {
929 core_str::StrExt::is_char_boundary(&self[], index)
932 /// Pluck a character out of a string and return the index of the next
935 /// This function can be used to iterate over the Unicode characters of a
940 /// This example manually iterates through the characters of a
941 /// string; this should normally be done by `.chars()` or
945 /// use std::str::CharRange;
947 /// let s = "中华Việt Nam";
949 /// while i < s.len() {
950 /// let CharRange {ch, next} = s.char_range_at(i);
951 /// println!("{}: {}", i, ch);
974 /// * i - The byte offset of the char to extract
978 /// A record {ch: char, next: usize} containing the char value and the byte
979 /// index of the next Unicode character.
983 /// If `i` is greater than or equal to the length of the string.
984 /// If `i` is not the index of the beginning of a valid UTF-8 character.
985 #[unstable(feature = "collections",
986 reason = "naming is uncertain with container conventions")]
987 fn char_range_at(&self, start: usize) -> CharRange {
988 core_str::StrExt::char_range_at(&self[], start)
991 /// Given a byte position and a str, return the previous char and its position.
993 /// This function can be used to iterate over a Unicode string in reverse.
995 /// Returns 0 for next index if called on start index 0.
999 /// If `i` is greater than the length of the string.
1000 /// If `i` is not an index following a valid UTF-8 character.
1001 #[unstable(feature = "collections",
1002 reason = "naming is uncertain with container conventions")]
1003 fn char_range_at_reverse(&self, start: usize) -> CharRange {
1004 core_str::StrExt::char_range_at_reverse(&self[], start)
1007 /// Plucks the character starting at the `i`th byte of a string.
1013 /// assert_eq!(s.char_at(1), 'b');
1014 /// assert_eq!(s.char_at(2), 'π');
1015 /// assert_eq!(s.char_at(4), 'c');
1020 /// If `i` is greater than or equal to the length of the string.
1021 /// If `i` is not the index of the beginning of a valid UTF-8 character.
1022 #[unstable(feature = "collections",
1023 reason = "naming is uncertain with container conventions")]
1024 fn char_at(&self, i: usize) -> char {
1025 core_str::StrExt::char_at(&self[], i)
1028 /// Plucks the character ending at the `i`th byte of a string.
1032 /// If `i` is greater than the length of the string.
1033 /// If `i` is not an index following a valid UTF-8 character.
1034 #[unstable(feature = "collections",
1035 reason = "naming is uncertain with container conventions")]
1036 fn char_at_reverse(&self, i: usize) -> char {
1037 core_str::StrExt::char_at_reverse(&self[], i)
1040 /// Work with the byte buffer of a string as a byte slice.
1045 /// assert_eq!("bors".as_bytes(), b"bors");
1047 #[stable(feature = "rust1", since = "1.0.0")]
1048 fn as_bytes(&self) -> &[u8] {
1049 core_str::StrExt::as_bytes(&self[])
1052 /// Returns the byte index of the first character of `self` that
1053 /// matches the pattern `pat`.
1057 /// `Some` containing the byte index of the last matching character
1058 /// or `None` if there is no match
1063 /// let s = "Löwe 老虎 Léopard";
1065 /// assert_eq!(s.find('L'), Some(0));
1066 /// assert_eq!(s.find('é'), Some(14));
1068 /// // the first space
1069 /// assert_eq!(s.find(|c: char| c.is_whitespace()), Some(5));
1071 /// // neither are found
1072 /// let x: &[_] = &['1', '2'];
1073 /// assert_eq!(s.find(x), None);
1075 #[stable(feature = "rust1", since = "1.0.0")]
1076 fn find<P: CharEq>(&self, pat: P) -> Option<usize> {
1077 core_str::StrExt::find(&self[], pat)
1080 /// Returns the byte index of the last character of `self` that
1081 /// matches the pattern `pat`.
1085 /// `Some` containing the byte index of the last matching character
1086 /// or `None` if there is no match.
1091 /// let s = "Löwe 老虎 Léopard";
1093 /// assert_eq!(s.rfind('L'), Some(13));
1094 /// assert_eq!(s.rfind('é'), Some(14));
1096 /// // the second space
1097 /// assert_eq!(s.rfind(|c: char| c.is_whitespace()), Some(12));
1099 /// // searches for an occurrence of either `1` or `2`, but neither are found
1100 /// let x: &[_] = &['1', '2'];
1101 /// assert_eq!(s.rfind(x), None);
1103 #[stable(feature = "rust1", since = "1.0.0")]
1104 fn rfind<P: CharEq>(&self, pat: P) -> Option<usize> {
1105 core_str::StrExt::rfind(&self[], pat)
1108 /// Returns the byte index of the first matching substring
1112 /// * `needle` - The string to search for
1116 /// `Some` containing the byte index of the first matching substring
1117 /// or `None` if there is no match.
1122 /// let s = "Löwe 老虎 Léopard";
1124 /// assert_eq!(s.find_str("老虎 L"), Some(6));
1125 /// assert_eq!(s.find_str("muffin man"), None);
1127 #[unstable(feature = "collections",
1128 reason = "might get removed in favor of a more generic find in the future")]
1129 fn find_str(&self, needle: &str) -> Option<usize> {
1130 core_str::StrExt::find_str(&self[], needle)
1133 /// Retrieves the first character from a string slice and returns
1134 /// it. This does not allocate a new string; instead, it returns a
1135 /// slice that point one character beyond the character that was
1136 /// shifted. If the string does not contain any characters,
1137 /// None is returned instead.
1142 /// let s = "Löwe 老虎 Léopard";
1143 /// let (c, s1) = s.slice_shift_char().unwrap();
1144 /// assert_eq!(c, 'L');
1145 /// assert_eq!(s1, "öwe 老虎 Léopard");
1147 /// let (c, s2) = s1.slice_shift_char().unwrap();
1148 /// assert_eq!(c, 'ö');
1149 /// assert_eq!(s2, "we 老虎 Léopard");
1151 #[unstable(feature = "collections",
1152 reason = "awaiting conventions about shifting and slices")]
1153 fn slice_shift_char(&self) -> Option<(char, &str)> {
1154 core_str::StrExt::slice_shift_char(&self[])
1157 /// Returns the byte offset of an inner slice relative to an enclosing outer slice.
1159 /// Panics if `inner` is not a direct slice contained within self.
1164 /// let string = "a\nb\nc";
1165 /// let lines: Vec<&str> = string.lines().collect();
1167 /// assert!(string.subslice_offset(lines[0]) == 0); // &"a"
1168 /// assert!(string.subslice_offset(lines[1]) == 2); // &"b"
1169 /// assert!(string.subslice_offset(lines[2]) == 4); // &"c"
1171 #[unstable(feature = "collections",
1172 reason = "awaiting convention about comparability of arbitrary slices")]
1173 fn subslice_offset(&self, inner: &str) -> usize {
1174 core_str::StrExt::subslice_offset(&self[], inner)
1177 /// Return an unsafe pointer to the strings buffer.
1179 /// The caller must ensure that the string outlives this pointer,
1180 /// and that it is not reallocated (e.g. by pushing to the
1182 #[stable(feature = "rust1", since = "1.0.0")]
1184 fn as_ptr(&self) -> *const u8 {
1185 core_str::StrExt::as_ptr(&self[])
1188 /// Return an iterator of `u16` over the string encoded as UTF-16.
1189 #[unstable(feature = "collections",
1190 reason = "this functionality may only be provided by libunicode")]
1191 fn utf16_units(&self) -> Utf16Units {
1192 Utf16Units { encoder: Utf16Encoder::new(self[].chars()) }
1195 /// Return the number of bytes in this string
1200 /// assert_eq!("foo".len(), 3);
1201 /// assert_eq!("ƒoo".len(), 4);
1203 #[stable(feature = "rust1", since = "1.0.0")]
1205 fn len(&self) -> usize {
1206 core_str::StrExt::len(&self[])
1209 /// Returns true if this slice contains no bytes
1214 /// assert!("".is_empty());
1217 #[stable(feature = "rust1", since = "1.0.0")]
1218 fn is_empty(&self) -> bool {
1219 core_str::StrExt::is_empty(&self[])
1222 /// Parse this string into the specified type.
1227 /// assert_eq!("4".parse::<u32>(), Ok(4));
1228 /// assert!("j".parse::<u32>().is_err());
1231 #[stable(feature = "rust1", since = "1.0.0")]
1232 fn parse<F: FromStr>(&self) -> Result<F, F::Err> {
1233 core_str::StrExt::parse(&self[])
1236 /// Returns an iterator over the
1237 /// [grapheme clusters](http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries)
1240 /// If `is_extended` is true, the iterator is over the *extended grapheme clusters*;
1241 /// otherwise, the iterator is over the *legacy grapheme clusters*.
1242 /// [UAX#29](http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries)
1243 /// recommends extended grapheme cluster boundaries for general processing.
1248 /// let gr1 = "a\u{310}e\u{301}o\u{308}\u{332}".graphemes(true).collect::<Vec<&str>>();
1249 /// let b: &[_] = &["a\u{310}", "e\u{301}", "o\u{308}\u{332}"];
1250 /// assert_eq!(gr1.as_slice(), b);
1251 /// let gr2 = "a\r\nb🇷🇺🇸🇹".graphemes(true).collect::<Vec<&str>>();
1252 /// let b: &[_] = &["a", "\r\n", "b", "🇷🇺🇸🇹"];
1253 /// assert_eq!(gr2.as_slice(), b);
1255 #[unstable(feature = "collections",
1256 reason = "this functionality may only be provided by libunicode")]
1257 fn graphemes(&self, is_extended: bool) -> Graphemes {
1258 UnicodeStr::graphemes(&self[], is_extended)
1261 /// Returns an iterator over the grapheme clusters of self and their byte offsets.
1262 /// See `graphemes()` method for more information.
1267 /// let gr_inds = "a̐éö̲\r\n".grapheme_indices(true).collect::<Vec<(usize, &str)>>();
1268 /// let b: &[_] = &[(0, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")];
1269 /// assert_eq!(gr_inds.as_slice(), b);
1271 #[unstable(feature = "collections",
1272 reason = "this functionality may only be provided by libunicode")]
1273 fn grapheme_indices(&self, is_extended: bool) -> GraphemeIndices {
1274 UnicodeStr::grapheme_indices(&self[], is_extended)
1277 /// An iterator over the words of a string (subsequences separated
1278 /// by any sequence of whitespace). Sequences of whitespace are
1279 /// collapsed, so empty "words" are not included.
1284 /// let some_words = " Mary had\ta little \n\t lamb";
1285 /// let v: Vec<&str> = some_words.words().collect();
1286 /// assert_eq!(v, vec!["Mary", "had", "a", "little", "lamb"]);
1288 #[unstable(feature = "str_words",
1289 reason = "the precise algorithm to use is unclear")]
1290 fn words(&self) -> Words {
1291 UnicodeStr::words(&self[])
1294 /// Returns a string's displayed width in columns, treating control
1295 /// characters as zero-width.
1297 /// `is_cjk` determines behavior for characters in the Ambiguous category:
1298 /// if `is_cjk` is `true`, these are 2 columns wide; otherwise, they are 1.
1299 /// In CJK locales, `is_cjk` should be `true`, else it should be `false`.
1300 /// [Unicode Standard Annex #11](http://www.unicode.org/reports/tr11/)
1301 /// recommends that these characters be treated as 1 column (i.e.,
1302 /// `is_cjk` = `false`) if the locale is unknown.
1303 #[unstable(feature = "collections",
1304 reason = "this functionality may only be provided by libunicode")]
1305 fn width(&self, is_cjk: bool) -> usize {
1306 UnicodeStr::width(&self[], is_cjk)
1309 /// Returns a string with leading and trailing whitespace removed.
1310 #[stable(feature = "rust1", since = "1.0.0")]
1311 fn trim(&self) -> &str {
1312 UnicodeStr::trim(&self[])
1315 /// Returns a string with leading whitespace removed.
1316 #[stable(feature = "rust1", since = "1.0.0")]
1317 fn trim_left(&self) -> &str {
1318 UnicodeStr::trim_left(&self[])
1321 /// Returns a string with trailing whitespace removed.
1322 #[stable(feature = "rust1", since = "1.0.0")]
1323 fn trim_right(&self) -> &str {
1324 UnicodeStr::trim_right(&self[])
1328 #[stable(feature = "rust1", since = "1.0.0")]
1329 impl StrExt for str {
1330 fn slice(&self, begin: usize, end: usize) -> &str {
1334 fn slice_from(&self, begin: usize) -> &str {
1338 fn slice_to(&self, end: usize) -> &str {
1347 use core::iter::AdditiveIterator;
1348 use super::from_utf8;
1349 use super::Utf8Error;
1354 assert!("" <= "foo");
1355 assert!("foo" <= "foo");
1356 assert!("foo" != "bar");
1361 assert_eq!("".len(), 0);
1362 assert_eq!("hello world".len(), 11);
1363 assert_eq!("\x63".len(), 1);
1364 assert_eq!("\u{a2}".len(), 2);
1365 assert_eq!("\u{3c0}".len(), 2);
1366 assert_eq!("\u{2620}".len(), 3);
1367 assert_eq!("\u{1d11e}".len(), 4);
1369 assert_eq!("".chars().count(), 0);
1370 assert_eq!("hello world".chars().count(), 11);
1371 assert_eq!("\x63".chars().count(), 1);
1372 assert_eq!("\u{a2}".chars().count(), 1);
1373 assert_eq!("\u{3c0}".chars().count(), 1);
1374 assert_eq!("\u{2620}".chars().count(), 1);
1375 assert_eq!("\u{1d11e}".chars().count(), 1);
1376 assert_eq!("ประเทศไทย中华Việt Nam".chars().count(), 19);
1378 assert_eq!("hello".width(false), 10);
1379 assert_eq!("hello".width(true), 10);
1380 assert_eq!("\0\0\0\0\0".width(false), 0);
1381 assert_eq!("\0\0\0\0\0".width(true), 0);
1382 assert_eq!("".width(false), 0);
1383 assert_eq!("".width(true), 0);
1384 assert_eq!("\u{2081}\u{2082}\u{2083}\u{2084}".width(false), 4);
1385 assert_eq!("\u{2081}\u{2082}\u{2083}\u{2084}".width(true), 8);
1390 assert_eq!("hello".find('l'), Some(2));
1391 assert_eq!("hello".find(|c:char| c == 'o'), Some(4));
1392 assert!("hello".find('x').is_none());
1393 assert!("hello".find(|c:char| c == 'x').is_none());
1394 assert_eq!("ประเทศไทย中华Việt Nam".find('华'), Some(30));
1395 assert_eq!("ประเทศไทย中华Việt Nam".find(|c: char| c == '华'), Some(30));
1400 assert_eq!("hello".rfind('l'), Some(3));
1401 assert_eq!("hello".rfind(|c:char| c == 'o'), Some(4));
1402 assert!("hello".rfind('x').is_none());
1403 assert!("hello".rfind(|c:char| c == 'x').is_none());
1404 assert_eq!("ประเทศไทย中华Việt Nam".rfind('华'), Some(30));
1405 assert_eq!("ประเทศไทย中华Việt Nam".rfind(|c: char| c == '华'), Some(30));
1410 let empty = String::from_str("");
1411 let s: String = empty.chars().collect();
1412 assert_eq!(empty, s);
1413 let data = String::from_str("ประเทศไทย中");
1414 let s: String = data.chars().collect();
1415 assert_eq!(data, s);
1419 fn test_into_bytes() {
1420 let data = String::from_str("asdf");
1421 let buf = data.into_bytes();
1422 assert_eq!(b"asdf", buf);
1426 fn test_find_str() {
1428 assert_eq!("".find_str(""), Some(0));
1429 assert!("banana".find_str("apple pie").is_none());
1431 let data = "abcabc";
1432 assert_eq!(data[0..6].find_str("ab"), Some(0));
1433 assert_eq!(data[2..6].find_str("ab"), Some(3 - 2));
1434 assert!(data[2..4].find_str("ab").is_none());
1436 let string = "ประเทศไทย中华Việt Nam";
1437 let mut data = String::from_str(string);
1438 data.push_str(string);
1439 assert!(data.find_str("ไท华").is_none());
1440 assert_eq!(data[0..43].find_str(""), Some(0));
1441 assert_eq!(data[6..43].find_str(""), Some(6 - 6));
1443 assert_eq!(data[0..43].find_str("ประ"), Some( 0));
1444 assert_eq!(data[0..43].find_str("ทศไ"), Some(12));
1445 assert_eq!(data[0..43].find_str("ย中"), Some(24));
1446 assert_eq!(data[0..43].find_str("iệt"), Some(34));
1447 assert_eq!(data[0..43].find_str("Nam"), Some(40));
1449 assert_eq!(data[43..86].find_str("ประ"), Some(43 - 43));
1450 assert_eq!(data[43..86].find_str("ทศไ"), Some(55 - 43));
1451 assert_eq!(data[43..86].find_str("ย中"), Some(67 - 43));
1452 assert_eq!(data[43..86].find_str("iệt"), Some(77 - 43));
1453 assert_eq!(data[43..86].find_str("Nam"), Some(83 - 43));
1457 fn test_slice_chars() {
1458 fn t(a: &str, b: &str, start: usize) {
1459 assert_eq!(a.slice_chars(start, start + b.chars().count()), b);
1462 t("hello", "llo", 2);
1463 t("hello", "el", 1);
1466 assert_eq!("ะเทศไท", "ประเทศไทย中华Việt Nam".slice_chars(2, 8));
1469 fn s(x: &str) -> String { x.to_string() }
1471 macro_rules! test_concat {
1472 ($expected: expr, $string: expr) => {
1474 let s: String = $string.concat();
1475 assert_eq!($expected, s);
1481 fn test_concat_for_different_types() {
1482 test_concat!("ab", vec![s("a"), s("b")]);
1483 test_concat!("ab", vec!["a", "b"]);
1484 test_concat!("ab", vec!["a", "b"]);
1485 test_concat!("ab", vec![s("a"), s("b")]);
1489 fn test_concat_for_different_lengths() {
1490 let empty: &[&str] = &[];
1491 test_concat!("", empty);
1492 test_concat!("a", ["a"]);
1493 test_concat!("ab", ["a", "b"]);
1494 test_concat!("abc", ["", "a", "bc"]);
1497 macro_rules! test_connect {
1498 ($expected: expr, $string: expr, $delim: expr) => {
1500 let s = $string.connect($delim);
1501 assert_eq!($expected, s);
1507 fn test_connect_for_different_types() {
1508 test_connect!("a-b", ["a", "b"], "-");
1509 let hyphen = "-".to_string();
1510 test_connect!("a-b", [s("a"), s("b")], &*hyphen);
1511 test_connect!("a-b", vec!["a", "b"], &*hyphen);
1512 test_connect!("a-b", &*vec!["a", "b"], "-");
1513 test_connect!("a-b", vec![s("a"), s("b")], "-");
1517 fn test_connect_for_different_lengths() {
1518 let empty: &[&str] = &[];
1519 test_connect!("", empty, "-");
1520 test_connect!("a", ["a"], "-");
1521 test_connect!("a-b", ["a", "b"], "-");
1522 test_connect!("-a-bc", ["", "a", "bc"], "-");
1526 fn test_unsafe_slice() {
1527 assert_eq!("ab", unsafe {"abc".slice_unchecked(0, 2)});
1528 assert_eq!("bc", unsafe {"abc".slice_unchecked(1, 3)});
1529 assert_eq!("", unsafe {"abc".slice_unchecked(1, 1)});
1530 fn a_million_letter_a() -> String {
1532 let mut rs = String::new();
1534 rs.push_str("aaaaaaaaaa");
1539 fn half_a_million_letter_a() -> String {
1541 let mut rs = String::new();
1543 rs.push_str("aaaaa");
1548 let letters = a_million_letter_a();
1549 assert!(half_a_million_letter_a() ==
1550 unsafe {String::from_str(letters.slice_unchecked(
1556 fn test_starts_with() {
1557 assert!(("".starts_with("")));
1558 assert!(("abc".starts_with("")));
1559 assert!(("abc".starts_with("a")));
1560 assert!((!"a".starts_with("abc")));
1561 assert!((!"".starts_with("abc")));
1562 assert!((!"ödd".starts_with("-")));
1563 assert!(("ödd".starts_with("öd")));
1567 fn test_ends_with() {
1568 assert!(("".ends_with("")));
1569 assert!(("abc".ends_with("")));
1570 assert!(("abc".ends_with("c")));
1571 assert!((!"a".ends_with("abc")));
1572 assert!((!"".ends_with("abc")));
1573 assert!((!"ddö".ends_with("-")));
1574 assert!(("ddö".ends_with("dö")));
1578 fn test_is_empty() {
1579 assert!("".is_empty());
1580 assert!(!"a".is_empty());
1586 assert_eq!("".replace(a, "b"), String::from_str(""));
1587 assert_eq!("a".replace(a, "b"), String::from_str("b"));
1588 assert_eq!("ab".replace(a, "b"), String::from_str("bb"));
1590 assert!(" test test ".replace(test, "toast") ==
1591 String::from_str(" toast toast "));
1592 assert_eq!(" test test ".replace(test, ""), String::from_str(" "));
1596 fn test_replace_2a() {
1597 let data = "ประเทศไทย中华";
1598 let repl = "دولة الكويت";
1601 let a2 = "دولة الكويتทศไทย中华";
1602 assert_eq!(data.replace(a, repl), a2);
1606 fn test_replace_2b() {
1607 let data = "ประเทศไทย中华";
1608 let repl = "دولة الكويت";
1611 let b2 = "ปรدولة الكويتทศไทย中华";
1612 assert_eq!(data.replace(b, repl), b2);
1616 fn test_replace_2c() {
1617 let data = "ประเทศไทย中华";
1618 let repl = "دولة الكويت";
1621 let c2 = "ประเทศไทยدولة الكويت";
1622 assert_eq!(data.replace(c, repl), c2);
1626 fn test_replace_2d() {
1627 let data = "ประเทศไทย中华";
1628 let repl = "دولة الكويت";
1631 assert_eq!(data.replace(d, repl), data);
1636 assert_eq!("ab", "abc".slice(0, 2));
1637 assert_eq!("bc", "abc".slice(1, 3));
1638 assert_eq!("", "abc".slice(1, 1));
1639 assert_eq!("\u{65e5}", "\u{65e5}\u{672c}".slice(0, 3));
1641 let data = "ประเทศไทย中华";
1642 assert_eq!("ป", data.slice(0, 3));
1643 assert_eq!("ร", data.slice(3, 6));
1644 assert_eq!("", data.slice(3, 3));
1645 assert_eq!("华", data.slice(30, 33));
1647 fn a_million_letter_x() -> String {
1649 let mut rs = String::new();
1651 rs.push_str("华华华华华华华华华华");
1656 fn half_a_million_letter_x() -> String {
1658 let mut rs = String::new();
1660 rs.push_str("华华华华华");
1665 let letters = a_million_letter_x();
1666 assert!(half_a_million_letter_x() ==
1667 String::from_str(letters.slice(0, 3 * 500000)));
1672 let ss = "中华Việt Nam";
1674 assert_eq!("华", ss.slice(3, 6));
1675 assert_eq!("Việt Nam", ss.slice(6, 16));
1677 assert_eq!("ab", "abc".slice(0, 2));
1678 assert_eq!("bc", "abc".slice(1, 3));
1679 assert_eq!("", "abc".slice(1, 1));
1681 assert_eq!("中", ss.slice(0, 3));
1682 assert_eq!("华V", ss.slice(3, 7));
1683 assert_eq!("", ss.slice(3, 3));
1698 fn test_slice_fail() {
1699 "中华Việt Nam".slice(0, 2);
1703 fn test_slice_from() {
1704 assert_eq!("abcd".slice_from(0), "abcd");
1705 assert_eq!("abcd".slice_from(2), "cd");
1706 assert_eq!("abcd".slice_from(4), "");
1709 fn test_slice_to() {
1710 assert_eq!("abcd".slice_to(0), "");
1711 assert_eq!("abcd".slice_to(2), "ab");
1712 assert_eq!("abcd".slice_to(4), "abcd");
1716 fn test_trim_left_matches() {
1717 let v: &[char] = &[];
1718 assert_eq!(" *** foo *** ".trim_left_matches(v), " *** foo *** ");
1719 let chars: &[char] = &['*', ' '];
1720 assert_eq!(" *** foo *** ".trim_left_matches(chars), "foo *** ");
1721 assert_eq!(" *** *** ".trim_left_matches(chars), "");
1722 assert_eq!("foo *** ".trim_left_matches(chars), "foo *** ");
1724 assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
1725 let chars: &[char] = &['1', '2'];
1726 assert_eq!("12foo1bar12".trim_left_matches(chars), "foo1bar12");
1727 assert_eq!("123foo1bar123".trim_left_matches(|c: char| c.is_numeric()), "foo1bar123");
1731 fn test_trim_right_matches() {
1732 let v: &[char] = &[];
1733 assert_eq!(" *** foo *** ".trim_right_matches(v), " *** foo *** ");
1734 let chars: &[char] = &['*', ' '];
1735 assert_eq!(" *** foo *** ".trim_right_matches(chars), " *** foo");
1736 assert_eq!(" *** *** ".trim_right_matches(chars), "");
1737 assert_eq!(" *** foo".trim_right_matches(chars), " *** foo");
1739 assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar");
1740 let chars: &[char] = &['1', '2'];
1741 assert_eq!("12foo1bar12".trim_right_matches(chars), "12foo1bar");
1742 assert_eq!("123foo1bar123".trim_right_matches(|c: char| c.is_numeric()), "123foo1bar");
1746 fn test_trim_matches() {
1747 let v: &[char] = &[];
1748 assert_eq!(" *** foo *** ".trim_matches(v), " *** foo *** ");
1749 let chars: &[char] = &['*', ' '];
1750 assert_eq!(" *** foo *** ".trim_matches(chars), "foo");
1751 assert_eq!(" *** *** ".trim_matches(chars), "");
1752 assert_eq!("foo".trim_matches(chars), "foo");
1754 assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
1755 let chars: &[char] = &['1', '2'];
1756 assert_eq!("12foo1bar12".trim_matches(chars), "foo1bar");
1757 assert_eq!("123foo1bar123".trim_matches(|c: char| c.is_numeric()), "foo1bar");
1761 fn test_trim_left() {
1762 assert_eq!("".trim_left(), "");
1763 assert_eq!("a".trim_left(), "a");
1764 assert_eq!(" ".trim_left(), "");
1765 assert_eq!(" blah".trim_left(), "blah");
1766 assert_eq!(" \u{3000} wut".trim_left(), "wut");
1767 assert_eq!("hey ".trim_left(), "hey ");
1771 fn test_trim_right() {
1772 assert_eq!("".trim_right(), "");
1773 assert_eq!("a".trim_right(), "a");
1774 assert_eq!(" ".trim_right(), "");
1775 assert_eq!("blah ".trim_right(), "blah");
1776 assert_eq!("wut \u{3000} ".trim_right(), "wut");
1777 assert_eq!(" hey".trim_right(), " hey");
1782 assert_eq!("".trim(), "");
1783 assert_eq!("a".trim(), "a");
1784 assert_eq!(" ".trim(), "");
1785 assert_eq!(" blah ".trim(), "blah");
1786 assert_eq!("\nwut \u{3000} ".trim(), "wut");
1787 assert_eq!(" hey dude ".trim(), "hey dude");
1791 fn test_is_whitespace() {
1792 assert!("".chars().all(|c| c.is_whitespace()));
1793 assert!(" ".chars().all(|c| c.is_whitespace()));
1794 assert!("\u{2009}".chars().all(|c| c.is_whitespace())); // Thin space
1795 assert!(" \n\t ".chars().all(|c| c.is_whitespace()));
1796 assert!(!" _ ".chars().all(|c| c.is_whitespace()));
1800 fn test_slice_shift_char() {
1801 let data = "ประเทศไทย中";
1802 assert_eq!(data.slice_shift_char(), Some(('ป', "ระเทศไทย中")));
1806 fn test_slice_shift_char_2() {
1808 assert_eq!(empty.slice_shift_char(), None);
1813 // deny overlong encodings
1814 assert!(from_utf8(&[0xc0, 0x80]).is_err());
1815 assert!(from_utf8(&[0xc0, 0xae]).is_err());
1816 assert!(from_utf8(&[0xe0, 0x80, 0x80]).is_err());
1817 assert!(from_utf8(&[0xe0, 0x80, 0xaf]).is_err());
1818 assert!(from_utf8(&[0xe0, 0x81, 0x81]).is_err());
1819 assert!(from_utf8(&[0xf0, 0x82, 0x82, 0xac]).is_err());
1820 assert!(from_utf8(&[0xf4, 0x90, 0x80, 0x80]).is_err());
1823 assert!(from_utf8(&[0xED, 0xA0, 0x80]).is_err());
1824 assert!(from_utf8(&[0xED, 0xBF, 0xBF]).is_err());
1826 assert!(from_utf8(&[0xC2, 0x80]).is_ok());
1827 assert!(from_utf8(&[0xDF, 0xBF]).is_ok());
1828 assert!(from_utf8(&[0xE0, 0xA0, 0x80]).is_ok());
1829 assert!(from_utf8(&[0xED, 0x9F, 0xBF]).is_ok());
1830 assert!(from_utf8(&[0xEE, 0x80, 0x80]).is_ok());
1831 assert!(from_utf8(&[0xEF, 0xBF, 0xBF]).is_ok());
1832 assert!(from_utf8(&[0xF0, 0x90, 0x80, 0x80]).is_ok());
1833 assert!(from_utf8(&[0xF4, 0x8F, 0xBF, 0xBF]).is_ok());
1837 fn test_is_utf16() {
1838 use unicode::str::is_utf16;
1840 ($($e:expr),*) => { { $(assert!(is_utf16($e));)* } }
1849 // surrogate pairs (randomly generated with Python 3's
1850 // .encode('utf-16be'))
1851 pos!(&[0xdb54, 0xdf16, 0xd880, 0xdee0, 0xdb6a, 0xdd45],
1852 &[0xd91f, 0xdeb1, 0xdb31, 0xdd84, 0xd8e2, 0xde14],
1853 &[0xdb9f, 0xdc26, 0xdb6f, 0xde58, 0xd850, 0xdfae]);
1855 // mixtures (also random)
1856 pos!(&[0xd921, 0xdcc2, 0x002d, 0x004d, 0xdb32, 0xdf65],
1857 &[0xdb45, 0xdd2d, 0x006a, 0xdacd, 0xddfe, 0x0006],
1858 &[0x0067, 0xd8ff, 0xddb7, 0x000f, 0xd900, 0xdc80]);
1862 ($($e:expr),*) => { { $(assert!(!is_utf16($e));)* } }
1866 // surrogate + regular unit
1868 // surrogate + lead surrogate
1870 // unterminated surrogate
1872 // trail surrogate without a lead
1875 // random byte sequences that Python 3's .decode('utf-16be')
1877 neg!(&[0x5b3d, 0x0141, 0xde9e, 0x8fdc, 0xc6e7],
1878 &[0xdf5a, 0x82a5, 0x62b9, 0xb447, 0x92f3],
1879 &[0xda4e, 0x42bc, 0x4462, 0xee98, 0xc2ca],
1880 &[0xbe00, 0xb04a, 0x6ecb, 0xdd89, 0xe278],
1881 &[0x0465, 0xab56, 0xdbb6, 0xa893, 0x665e],
1882 &[0x6b7f, 0x0a19, 0x40f4, 0xa657, 0xdcc5],
1883 &[0x9b50, 0xda5e, 0x24ec, 0x03ad, 0x6dee],
1884 &[0x8d17, 0xcaa7, 0xf4ae, 0xdf6e, 0xbed7],
1885 &[0xdaee, 0x2584, 0x7d30, 0xa626, 0x121a],
1886 &[0xd956, 0x4b43, 0x7570, 0xccd6, 0x4f4a],
1887 &[0x9dcf, 0x1b49, 0x4ba5, 0xfce9, 0xdffe],
1888 &[0x6572, 0xce53, 0xb05a, 0xf6af, 0xdacf],
1889 &[0x1b90, 0x728c, 0x9906, 0xdb68, 0xf46e],
1890 &[0x1606, 0xbeca, 0xbe76, 0x860f, 0xdfa5],
1891 &[0x8b4f, 0xde7a, 0xd220, 0x9fac, 0x2b6f],
1892 &[0xb8fe, 0xebbe, 0xda32, 0x1a5f, 0x8b8b],
1893 &[0x934b, 0x8956, 0xc434, 0x1881, 0xddf7],
1894 &[0x5a95, 0x13fc, 0xf116, 0xd89b, 0x93f9],
1895 &[0xd640, 0x71f1, 0xdd7d, 0x77eb, 0x1cd8],
1896 &[0x348b, 0xaef0, 0xdb2c, 0xebf1, 0x1282],
1897 &[0x50d7, 0xd824, 0x5010, 0xb369, 0x22ea]);
1901 fn test_as_bytes() {
1904 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228,
1905 184, 173, 229, 141, 142, 86, 105, 225, 187, 135, 116, 32, 78, 97,
1909 assert_eq!("".as_bytes(), b);
1910 assert_eq!("abc".as_bytes(), b"abc");
1911 assert_eq!("ศไทย中华Việt Nam".as_bytes(), v);
1916 fn test_as_bytes_fail() {
1917 // Don't double free. (I'm not sure if this exercises the
1918 // original problem code path anymore.)
1919 let s = String::from_str("");
1920 let _bytes = s.as_bytes();
1926 let buf = "hello".as_ptr();
1928 assert_eq!(*buf.offset(0), b'h');
1929 assert_eq!(*buf.offset(1), b'e');
1930 assert_eq!(*buf.offset(2), b'l');
1931 assert_eq!(*buf.offset(3), b'l');
1932 assert_eq!(*buf.offset(4), b'o');
1937 fn test_subslice_offset() {
1938 let a = "kernelsprite";
1939 let b = &a[7..a.len()];
1940 let c = &a[0..a.len() - 6];
1941 assert_eq!(a.subslice_offset(b), 7);
1942 assert_eq!(a.subslice_offset(c), 0);
1944 let string = "a\nb\nc";
1945 let lines: Vec<&str> = string.lines().collect();
1946 assert_eq!(string.subslice_offset(lines[0]), 0);
1947 assert_eq!(string.subslice_offset(lines[1]), 2);
1948 assert_eq!(string.subslice_offset(lines[2]), 4);
1953 fn test_subslice_offset_2() {
1954 let a = "alchemiter";
1955 let b = "cruxtruder";
1956 a.subslice_offset(b);
1960 fn vec_str_conversions() {
1961 let s1: String = String::from_str("All mimsy were the borogoves");
1963 let v: Vec<u8> = s1.as_bytes().to_vec();
1964 let s2: String = String::from_str(from_utf8(&v).unwrap());
1970 let a: u8 = s1.as_bytes()[i];
1971 let b: u8 = s2.as_bytes()[i];
1980 fn test_contains() {
1981 assert!("abcde".contains("bcd"));
1982 assert!("abcde".contains("abcd"));
1983 assert!("abcde".contains("bcde"));
1984 assert!("abcde".contains(""));
1985 assert!("".contains(""));
1986 assert!(!"abcde".contains("def"));
1987 assert!(!"".contains("a"));
1989 let data = "ประเทศไทย中华Việt Nam";
1990 assert!(data.contains("ประเ"));
1991 assert!(data.contains("ะเ"));
1992 assert!(data.contains("中华"));
1993 assert!(!data.contains("ไท华"));
1997 fn test_contains_char() {
1998 assert!("abc".contains_char('b'));
1999 assert!("a".contains_char('a'));
2000 assert!(!"abc".contains_char('d'));
2001 assert!(!"".contains_char('a'));
2006 let s = "ศไทย中华Việt Nam";
2007 let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
2010 assert!(s.char_at(pos) == *ch);
2011 pos += ch.to_string().len();
2016 fn test_char_at_reverse() {
2017 let s = "ศไทย中华Việt Nam";
2018 let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
2019 let mut pos = s.len();
2020 for ch in v.iter().rev() {
2021 assert!(s.char_at_reverse(pos) == *ch);
2022 pos -= ch.to_string().len();
2027 fn test_escape_unicode() {
2028 assert_eq!("abc".escape_unicode(),
2029 String::from_str("\\u{61}\\u{62}\\u{63}"));
2030 assert_eq!("a c".escape_unicode(),
2031 String::from_str("\\u{61}\\u{20}\\u{63}"));
2032 assert_eq!("\r\n\t".escape_unicode(),
2033 String::from_str("\\u{d}\\u{a}\\u{9}"));
2034 assert_eq!("'\"\\".escape_unicode(),
2035 String::from_str("\\u{27}\\u{22}\\u{5c}"));
2036 assert_eq!("\x00\x01\u{fe}\u{ff}".escape_unicode(),
2037 String::from_str("\\u{0}\\u{1}\\u{fe}\\u{ff}"));
2038 assert_eq!("\u{100}\u{ffff}".escape_unicode(),
2039 String::from_str("\\u{100}\\u{ffff}"));
2040 assert_eq!("\u{10000}\u{10ffff}".escape_unicode(),
2041 String::from_str("\\u{10000}\\u{10ffff}"));
2042 assert_eq!("ab\u{fb00}".escape_unicode(),
2043 String::from_str("\\u{61}\\u{62}\\u{fb00}"));
2044 assert_eq!("\u{1d4ea}\r".escape_unicode(),
2045 String::from_str("\\u{1d4ea}\\u{d}"));
2049 fn test_escape_default() {
2050 assert_eq!("abc".escape_default(), String::from_str("abc"));
2051 assert_eq!("a c".escape_default(), String::from_str("a c"));
2052 assert_eq!("\r\n\t".escape_default(), String::from_str("\\r\\n\\t"));
2053 assert_eq!("'\"\\".escape_default(), String::from_str("\\'\\\"\\\\"));
2054 assert_eq!("\u{100}\u{ffff}".escape_default(),
2055 String::from_str("\\u{100}\\u{ffff}"));
2056 assert_eq!("\u{10000}\u{10ffff}".escape_default(),
2057 String::from_str("\\u{10000}\\u{10ffff}"));
2058 assert_eq!("ab\u{fb00}".escape_default(),
2059 String::from_str("ab\\u{fb00}"));
2060 assert_eq!("\u{1d4ea}\r".escape_default(),
2061 String::from_str("\\u{1d4ea}\\r"));
2065 fn test_total_ord() {
2066 "1234".cmp("123") == Greater;
2067 "123".cmp("1234") == Less;
2068 "1234".cmp("1234") == Equal;
2069 "12345555".cmp("123456") == Less;
2070 "22".cmp("1234") == Greater;
2074 fn test_char_range_at() {
2075 let data = "b¢€𤭢𤭢€¢b";
2076 assert_eq!('b', data.char_range_at(0).ch);
2077 assert_eq!('¢', data.char_range_at(1).ch);
2078 assert_eq!('€', data.char_range_at(3).ch);
2079 assert_eq!('𤭢', data.char_range_at(6).ch);
2080 assert_eq!('𤭢', data.char_range_at(10).ch);
2081 assert_eq!('€', data.char_range_at(14).ch);
2082 assert_eq!('¢', data.char_range_at(17).ch);
2083 assert_eq!('b', data.char_range_at(19).ch);
2087 fn test_char_range_at_reverse_underflow() {
2088 assert_eq!("abc".char_range_at_reverse(0).next, 0);
2092 fn test_iterator() {
2093 let s = "ศไทย中华Việt Nam";
2094 let v = ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
2100 assert_eq!(c, v[pos]);
2103 assert_eq!(pos, v.len());
2107 fn test_rev_iterator() {
2108 let s = "ศไทย中华Việt Nam";
2109 let v = ['m', 'a', 'N', ' ', 't', 'ệ','i','V','华','中','ย','ท','ไ','ศ'];
2112 let it = s.chars().rev();
2115 assert_eq!(c, v[pos]);
2118 assert_eq!(pos, v.len());
2122 fn test_chars_decoding() {
2123 let mut bytes = [0u8; 4];
2124 for c in (0u32..0x110000).filter_map(|c| ::core::char::from_u32(c)) {
2125 let len = c.encode_utf8(&mut bytes).unwrap_or(0);
2126 let s = ::core::str::from_utf8(&bytes[..len]).unwrap();
2127 if Some(c) != s.chars().next() {
2128 panic!("character {:x}={} does not decode correctly", c as u32, c);
2134 fn test_chars_rev_decoding() {
2135 let mut bytes = [0u8; 4];
2136 for c in (0u32..0x110000).filter_map(|c| ::core::char::from_u32(c)) {
2137 let len = c.encode_utf8(&mut bytes).unwrap_or(0);
2138 let s = ::core::str::from_utf8(&bytes[..len]).unwrap();
2139 if Some(c) != s.chars().rev().next() {
2140 panic!("character {:x}={} does not decode correctly", c as u32, c);
2146 fn test_iterator_clone() {
2147 let s = "ศไทย中华Việt Nam";
2148 let mut it = s.chars();
2150 assert!(it.clone().zip(it).all(|(x,y)| x == y));
2154 fn test_bytesator() {
2155 let s = "ศไทย中华Việt Nam";
2157 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228,
2158 184, 173, 229, 141, 142, 86, 105, 225, 187, 135, 116, 32, 78, 97,
2163 for b in s.bytes() {
2164 assert_eq!(b, v[pos]);
2170 fn test_bytes_revator() {
2171 let s = "ศไทย中华Việt Nam";
2173 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228,
2174 184, 173, 229, 141, 142, 86, 105, 225, 187, 135, 116, 32, 78, 97,
2177 let mut pos = v.len();
2179 for b in s.bytes().rev() {
2181 assert_eq!(b, v[pos]);
2186 fn test_char_indicesator() {
2187 let s = "ศไทย中华Việt Nam";
2188 let p = [0, 3, 6, 9, 12, 15, 18, 19, 20, 23, 24, 25, 26, 27];
2189 let v = ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'];
2192 let it = s.char_indices();
2195 assert_eq!(c, (p[pos], v[pos]));
2198 assert_eq!(pos, v.len());
2199 assert_eq!(pos, p.len());
2203 fn test_char_indices_revator() {
2204 let s = "ศไทย中华Việt Nam";
2205 let p = [27, 26, 25, 24, 23, 20, 19, 18, 15, 12, 9, 6, 3, 0];
2206 let v = ['m', 'a', 'N', ' ', 't', 'ệ','i','V','华','中','ย','ท','ไ','ศ'];
2209 let it = s.char_indices().rev();
2212 assert_eq!(c, (p[pos], v[pos]));
2215 assert_eq!(pos, v.len());
2216 assert_eq!(pos, p.len());
2220 fn test_splitn_char_iterator() {
2221 let data = "\nMäry häd ä little lämb\nLittle lämb\n";
2223 let split: Vec<&str> = data.splitn(3, ' ').collect();
2224 assert_eq!(split, vec!["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]);
2226 let split: Vec<&str> = data.splitn(3, |c: char| c == ' ').collect();
2227 assert_eq!(split, vec!["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]);
2230 let split: Vec<&str> = data.splitn(3, 'ä').collect();
2231 assert_eq!(split, vec!["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]);
2233 let split: Vec<&str> = data.splitn(3, |c: char| c == 'ä').collect();
2234 assert_eq!(split, vec!["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]);
2238 fn test_split_char_iterator_no_trailing() {
2239 let data = "\nMäry häd ä little lämb\nLittle lämb\n";
2241 let split: Vec<&str> = data.split('\n').collect();
2242 assert_eq!(split, vec!["", "Märy häd ä little lämb", "Little lämb", ""]);
2244 let split: Vec<&str> = data.split_terminator('\n').collect();
2245 assert_eq!(split, vec!["", "Märy häd ä little lämb", "Little lämb"]);
2250 let data = "\n \tMäry häd\tä little lämb\nLittle lämb\n";
2251 let words: Vec<&str> = data.words().collect();
2252 assert_eq!(words, vec!["Märy", "häd", "ä", "little", "lämb", "Little", "lämb"])
2256 fn test_nfd_chars() {
2258 ($input: expr, $expected: expr) => {
2259 assert_eq!($input.nfd_chars().collect::<String>(), $expected);
2263 t!("\u{1e0b}\u{1c4}", "d\u{307}\u{1c4}");
2264 t!("\u{2026}", "\u{2026}");
2265 t!("\u{2126}", "\u{3a9}");
2266 t!("\u{1e0b}\u{323}", "d\u{323}\u{307}");
2267 t!("\u{1e0d}\u{307}", "d\u{323}\u{307}");
2268 t!("a\u{301}", "a\u{301}");
2269 t!("\u{301}a", "\u{301}a");
2270 t!("\u{d4db}", "\u{1111}\u{1171}\u{11b6}");
2271 t!("\u{ac1c}", "\u{1100}\u{1162}");
2275 fn test_nfkd_chars() {
2277 ($input: expr, $expected: expr) => {
2278 assert_eq!($input.nfkd_chars().collect::<String>(), $expected);
2282 t!("\u{1e0b}\u{1c4}", "d\u{307}DZ\u{30c}");
2283 t!("\u{2026}", "...");
2284 t!("\u{2126}", "\u{3a9}");
2285 t!("\u{1e0b}\u{323}", "d\u{323}\u{307}");
2286 t!("\u{1e0d}\u{307}", "d\u{323}\u{307}");
2287 t!("a\u{301}", "a\u{301}");
2288 t!("\u{301}a", "\u{301}a");
2289 t!("\u{d4db}", "\u{1111}\u{1171}\u{11b6}");
2290 t!("\u{ac1c}", "\u{1100}\u{1162}");
2294 fn test_nfc_chars() {
2296 ($input: expr, $expected: expr) => {
2297 assert_eq!($input.nfc_chars().collect::<String>(), $expected);
2301 t!("\u{1e0b}\u{1c4}", "\u{1e0b}\u{1c4}");
2302 t!("\u{2026}", "\u{2026}");
2303 t!("\u{2126}", "\u{3a9}");
2304 t!("\u{1e0b}\u{323}", "\u{1e0d}\u{307}");
2305 t!("\u{1e0d}\u{307}", "\u{1e0d}\u{307}");
2306 t!("a\u{301}", "\u{e1}");
2307 t!("\u{301}a", "\u{301}a");
2308 t!("\u{d4db}", "\u{d4db}");
2309 t!("\u{ac1c}", "\u{ac1c}");
2310 t!("a\u{300}\u{305}\u{315}\u{5ae}b", "\u{e0}\u{5ae}\u{305}\u{315}b");
2314 fn test_nfkc_chars() {
2316 ($input: expr, $expected: expr) => {
2317 assert_eq!($input.nfkc_chars().collect::<String>(), $expected);
2321 t!("\u{1e0b}\u{1c4}", "\u{1e0b}D\u{17d}");
2322 t!("\u{2026}", "...");
2323 t!("\u{2126}", "\u{3a9}");
2324 t!("\u{1e0b}\u{323}", "\u{1e0d}\u{307}");
2325 t!("\u{1e0d}\u{307}", "\u{1e0d}\u{307}");
2326 t!("a\u{301}", "\u{e1}");
2327 t!("\u{301}a", "\u{301}a");
2328 t!("\u{d4db}", "\u{d4db}");
2329 t!("\u{ac1c}", "\u{ac1c}");
2330 t!("a\u{300}\u{305}\u{315}\u{5ae}b", "\u{e0}\u{5ae}\u{305}\u{315}b");
2335 let data = "\nMäry häd ä little lämb\n\nLittle lämb\n";
2336 let lines: Vec<&str> = data.lines().collect();
2337 assert_eq!(lines, vec!["", "Märy häd ä little lämb", "", "Little lämb"]);
2339 let data = "\nMäry häd ä little lämb\n\nLittle lämb"; // no trailing \n
2340 let lines: Vec<&str> = data.lines().collect();
2341 assert_eq!(lines, vec!["", "Märy häd ä little lämb", "", "Little lämb"]);
2345 fn test_graphemes() {
2346 use core::iter::order;
2347 // official Unicode test data
2348 // from http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakTest.txt
2349 let test_same: [(_, &[_]); 325] = [
2350 ("\u{20}\u{20}", &["\u{20}", "\u{20}"]),
2351 ("\u{20}\u{308}\u{20}", &["\u{20}\u{308}", "\u{20}"]),
2352 ("\u{20}\u{D}", &["\u{20}", "\u{D}"]),
2353 ("\u{20}\u{308}\u{D}", &["\u{20}\u{308}", "\u{D}"]),
2354 ("\u{20}\u{A}", &["\u{20}", "\u{A}"]),
2355 ("\u{20}\u{308}\u{A}", &["\u{20}\u{308}", "\u{A}"]),
2356 ("\u{20}\u{1}", &["\u{20}", "\u{1}"]),
2357 ("\u{20}\u{308}\u{1}", &["\u{20}\u{308}", "\u{1}"]),
2358 ("\u{20}\u{300}", &["\u{20}\u{300}"]),
2359 ("\u{20}\u{308}\u{300}", &["\u{20}\u{308}\u{300}"]),
2360 ("\u{20}\u{1100}", &["\u{20}", "\u{1100}"]),
2361 ("\u{20}\u{308}\u{1100}", &["\u{20}\u{308}", "\u{1100}"]),
2362 ("\u{20}\u{1160}", &["\u{20}", "\u{1160}"]),
2363 ("\u{20}\u{308}\u{1160}", &["\u{20}\u{308}", "\u{1160}"]),
2364 ("\u{20}\u{11A8}", &["\u{20}", "\u{11A8}"]),
2365 ("\u{20}\u{308}\u{11A8}", &["\u{20}\u{308}", "\u{11A8}"]),
2366 ("\u{20}\u{AC00}", &["\u{20}", "\u{AC00}"]),
2367 ("\u{20}\u{308}\u{AC00}", &["\u{20}\u{308}", "\u{AC00}"]),
2368 ("\u{20}\u{AC01}", &["\u{20}", "\u{AC01}"]),
2369 ("\u{20}\u{308}\u{AC01}", &["\u{20}\u{308}", "\u{AC01}"]),
2370 ("\u{20}\u{1F1E6}", &["\u{20}", "\u{1F1E6}"]),
2371 ("\u{20}\u{308}\u{1F1E6}", &["\u{20}\u{308}", "\u{1F1E6}"]),
2372 ("\u{20}\u{378}", &["\u{20}", "\u{378}"]),
2373 ("\u{20}\u{308}\u{378}", &["\u{20}\u{308}", "\u{378}"]),
2374 ("\u{D}\u{20}", &["\u{D}", "\u{20}"]),
2375 ("\u{D}\u{308}\u{20}", &["\u{D}", "\u{308}", "\u{20}"]),
2376 ("\u{D}\u{D}", &["\u{D}", "\u{D}"]),
2377 ("\u{D}\u{308}\u{D}", &["\u{D}", "\u{308}", "\u{D}"]),
2378 ("\u{D}\u{A}", &["\u{D}\u{A}"]),
2379 ("\u{D}\u{308}\u{A}", &["\u{D}", "\u{308}", "\u{A}"]),
2380 ("\u{D}\u{1}", &["\u{D}", "\u{1}"]),
2381 ("\u{D}\u{308}\u{1}", &["\u{D}", "\u{308}", "\u{1}"]),
2382 ("\u{D}\u{300}", &["\u{D}", "\u{300}"]),
2383 ("\u{D}\u{308}\u{300}", &["\u{D}", "\u{308}\u{300}"]),
2384 ("\u{D}\u{903}", &["\u{D}", "\u{903}"]),
2385 ("\u{D}\u{1100}", &["\u{D}", "\u{1100}"]),
2386 ("\u{D}\u{308}\u{1100}", &["\u{D}", "\u{308}", "\u{1100}"]),
2387 ("\u{D}\u{1160}", &["\u{D}", "\u{1160}"]),
2388 ("\u{D}\u{308}\u{1160}", &["\u{D}", "\u{308}", "\u{1160}"]),
2389 ("\u{D}\u{11A8}", &["\u{D}", "\u{11A8}"]),
2390 ("\u{D}\u{308}\u{11A8}", &["\u{D}", "\u{308}", "\u{11A8}"]),
2391 ("\u{D}\u{AC00}", &["\u{D}", "\u{AC00}"]),
2392 ("\u{D}\u{308}\u{AC00}", &["\u{D}", "\u{308}", "\u{AC00}"]),
2393 ("\u{D}\u{AC01}", &["\u{D}", "\u{AC01}"]),
2394 ("\u{D}\u{308}\u{AC01}", &["\u{D}", "\u{308}", "\u{AC01}"]),
2395 ("\u{D}\u{1F1E6}", &["\u{D}", "\u{1F1E6}"]),
2396 ("\u{D}\u{308}\u{1F1E6}", &["\u{D}", "\u{308}", "\u{1F1E6}"]),
2397 ("\u{D}\u{378}", &["\u{D}", "\u{378}"]),
2398 ("\u{D}\u{308}\u{378}", &["\u{D}", "\u{308}", "\u{378}"]),
2399 ("\u{A}\u{20}", &["\u{A}", "\u{20}"]),
2400 ("\u{A}\u{308}\u{20}", &["\u{A}", "\u{308}", "\u{20}"]),
2401 ("\u{A}\u{D}", &["\u{A}", "\u{D}"]),
2402 ("\u{A}\u{308}\u{D}", &["\u{A}", "\u{308}", "\u{D}"]),
2403 ("\u{A}\u{A}", &["\u{A}", "\u{A}"]),
2404 ("\u{A}\u{308}\u{A}", &["\u{A}", "\u{308}", "\u{A}"]),
2405 ("\u{A}\u{1}", &["\u{A}", "\u{1}"]),
2406 ("\u{A}\u{308}\u{1}", &["\u{A}", "\u{308}", "\u{1}"]),
2407 ("\u{A}\u{300}", &["\u{A}", "\u{300}"]),
2408 ("\u{A}\u{308}\u{300}", &["\u{A}", "\u{308}\u{300}"]),
2409 ("\u{A}\u{903}", &["\u{A}", "\u{903}"]),
2410 ("\u{A}\u{1100}", &["\u{A}", "\u{1100}"]),
2411 ("\u{A}\u{308}\u{1100}", &["\u{A}", "\u{308}", "\u{1100}"]),
2412 ("\u{A}\u{1160}", &["\u{A}", "\u{1160}"]),
2413 ("\u{A}\u{308}\u{1160}", &["\u{A}", "\u{308}", "\u{1160}"]),
2414 ("\u{A}\u{11A8}", &["\u{A}", "\u{11A8}"]),
2415 ("\u{A}\u{308}\u{11A8}", &["\u{A}", "\u{308}", "\u{11A8}"]),
2416 ("\u{A}\u{AC00}", &["\u{A}", "\u{AC00}"]),
2417 ("\u{A}\u{308}\u{AC00}", &["\u{A}", "\u{308}", "\u{AC00}"]),
2418 ("\u{A}\u{AC01}", &["\u{A}", "\u{AC01}"]),
2419 ("\u{A}\u{308}\u{AC01}", &["\u{A}", "\u{308}", "\u{AC01}"]),
2420 ("\u{A}\u{1F1E6}", &["\u{A}", "\u{1F1E6}"]),
2421 ("\u{A}\u{308}\u{1F1E6}", &["\u{A}", "\u{308}", "\u{1F1E6}"]),
2422 ("\u{A}\u{378}", &["\u{A}", "\u{378}"]),
2423 ("\u{A}\u{308}\u{378}", &["\u{A}", "\u{308}", "\u{378}"]),
2424 ("\u{1}\u{20}", &["\u{1}", "\u{20}"]),
2425 ("\u{1}\u{308}\u{20}", &["\u{1}", "\u{308}", "\u{20}"]),
2426 ("\u{1}\u{D}", &["\u{1}", "\u{D}"]),
2427 ("\u{1}\u{308}\u{D}", &["\u{1}", "\u{308}", "\u{D}"]),
2428 ("\u{1}\u{A}", &["\u{1}", "\u{A}"]),
2429 ("\u{1}\u{308}\u{A}", &["\u{1}", "\u{308}", "\u{A}"]),
2430 ("\u{1}\u{1}", &["\u{1}", "\u{1}"]),
2431 ("\u{1}\u{308}\u{1}", &["\u{1}", "\u{308}", "\u{1}"]),
2432 ("\u{1}\u{300}", &["\u{1}", "\u{300}"]),
2433 ("\u{1}\u{308}\u{300}", &["\u{1}", "\u{308}\u{300}"]),
2434 ("\u{1}\u{903}", &["\u{1}", "\u{903}"]),
2435 ("\u{1}\u{1100}", &["\u{1}", "\u{1100}"]),
2436 ("\u{1}\u{308}\u{1100}", &["\u{1}", "\u{308}", "\u{1100}"]),
2437 ("\u{1}\u{1160}", &["\u{1}", "\u{1160}"]),
2438 ("\u{1}\u{308}\u{1160}", &["\u{1}", "\u{308}", "\u{1160}"]),
2439 ("\u{1}\u{11A8}", &["\u{1}", "\u{11A8}"]),
2440 ("\u{1}\u{308}\u{11A8}", &["\u{1}", "\u{308}", "\u{11A8}"]),
2441 ("\u{1}\u{AC00}", &["\u{1}", "\u{AC00}"]),
2442 ("\u{1}\u{308}\u{AC00}", &["\u{1}", "\u{308}", "\u{AC00}"]),
2443 ("\u{1}\u{AC01}", &["\u{1}", "\u{AC01}"]),
2444 ("\u{1}\u{308}\u{AC01}", &["\u{1}", "\u{308}", "\u{AC01}"]),
2445 ("\u{1}\u{1F1E6}", &["\u{1}", "\u{1F1E6}"]),
2446 ("\u{1}\u{308}\u{1F1E6}", &["\u{1}", "\u{308}", "\u{1F1E6}"]),
2447 ("\u{1}\u{378}", &["\u{1}", "\u{378}"]),
2448 ("\u{1}\u{308}\u{378}", &["\u{1}", "\u{308}", "\u{378}"]),
2449 ("\u{300}\u{20}", &["\u{300}", "\u{20}"]),
2450 ("\u{300}\u{308}\u{20}", &["\u{300}\u{308}", "\u{20}"]),
2451 ("\u{300}\u{D}", &["\u{300}", "\u{D}"]),
2452 ("\u{300}\u{308}\u{D}", &["\u{300}\u{308}", "\u{D}"]),
2453 ("\u{300}\u{A}", &["\u{300}", "\u{A}"]),
2454 ("\u{300}\u{308}\u{A}", &["\u{300}\u{308}", "\u{A}"]),
2455 ("\u{300}\u{1}", &["\u{300}", "\u{1}"]),
2456 ("\u{300}\u{308}\u{1}", &["\u{300}\u{308}", "\u{1}"]),
2457 ("\u{300}\u{300}", &["\u{300}\u{300}"]),
2458 ("\u{300}\u{308}\u{300}", &["\u{300}\u{308}\u{300}"]),
2459 ("\u{300}\u{1100}", &["\u{300}", "\u{1100}"]),
2460 ("\u{300}\u{308}\u{1100}", &["\u{300}\u{308}", "\u{1100}"]),
2461 ("\u{300}\u{1160}", &["\u{300}", "\u{1160}"]),
2462 ("\u{300}\u{308}\u{1160}", &["\u{300}\u{308}", "\u{1160}"]),
2463 ("\u{300}\u{11A8}", &["\u{300}", "\u{11A8}"]),
2464 ("\u{300}\u{308}\u{11A8}", &["\u{300}\u{308}", "\u{11A8}"]),
2465 ("\u{300}\u{AC00}", &["\u{300}", "\u{AC00}"]),
2466 ("\u{300}\u{308}\u{AC00}", &["\u{300}\u{308}", "\u{AC00}"]),
2467 ("\u{300}\u{AC01}", &["\u{300}", "\u{AC01}"]),
2468 ("\u{300}\u{308}\u{AC01}", &["\u{300}\u{308}", "\u{AC01}"]),
2469 ("\u{300}\u{1F1E6}", &["\u{300}", "\u{1F1E6}"]),
2470 ("\u{300}\u{308}\u{1F1E6}", &["\u{300}\u{308}", "\u{1F1E6}"]),
2471 ("\u{300}\u{378}", &["\u{300}", "\u{378}"]),
2472 ("\u{300}\u{308}\u{378}", &["\u{300}\u{308}", "\u{378}"]),
2473 ("\u{903}\u{20}", &["\u{903}", "\u{20}"]),
2474 ("\u{903}\u{308}\u{20}", &["\u{903}\u{308}", "\u{20}"]),
2475 ("\u{903}\u{D}", &["\u{903}", "\u{D}"]),
2476 ("\u{903}\u{308}\u{D}", &["\u{903}\u{308}", "\u{D}"]),
2477 ("\u{903}\u{A}", &["\u{903}", "\u{A}"]),
2478 ("\u{903}\u{308}\u{A}", &["\u{903}\u{308}", "\u{A}"]),
2479 ("\u{903}\u{1}", &["\u{903}", "\u{1}"]),
2480 ("\u{903}\u{308}\u{1}", &["\u{903}\u{308}", "\u{1}"]),
2481 ("\u{903}\u{300}", &["\u{903}\u{300}"]),
2482 ("\u{903}\u{308}\u{300}", &["\u{903}\u{308}\u{300}"]),
2483 ("\u{903}\u{1100}", &["\u{903}", "\u{1100}"]),
2484 ("\u{903}\u{308}\u{1100}", &["\u{903}\u{308}", "\u{1100}"]),
2485 ("\u{903}\u{1160}", &["\u{903}", "\u{1160}"]),
2486 ("\u{903}\u{308}\u{1160}", &["\u{903}\u{308}", "\u{1160}"]),
2487 ("\u{903}\u{11A8}", &["\u{903}", "\u{11A8}"]),
2488 ("\u{903}\u{308}\u{11A8}", &["\u{903}\u{308}", "\u{11A8}"]),
2489 ("\u{903}\u{AC00}", &["\u{903}", "\u{AC00}"]),
2490 ("\u{903}\u{308}\u{AC00}", &["\u{903}\u{308}", "\u{AC00}"]),
2491 ("\u{903}\u{AC01}", &["\u{903}", "\u{AC01}"]),
2492 ("\u{903}\u{308}\u{AC01}", &["\u{903}\u{308}", "\u{AC01}"]),
2493 ("\u{903}\u{1F1E6}", &["\u{903}", "\u{1F1E6}"]),
2494 ("\u{903}\u{308}\u{1F1E6}", &["\u{903}\u{308}", "\u{1F1E6}"]),
2495 ("\u{903}\u{378}", &["\u{903}", "\u{378}"]),
2496 ("\u{903}\u{308}\u{378}", &["\u{903}\u{308}", "\u{378}"]),
2497 ("\u{1100}\u{20}", &["\u{1100}", "\u{20}"]),
2498 ("\u{1100}\u{308}\u{20}", &["\u{1100}\u{308}", "\u{20}"]),
2499 ("\u{1100}\u{D}", &["\u{1100}", "\u{D}"]),
2500 ("\u{1100}\u{308}\u{D}", &["\u{1100}\u{308}", "\u{D}"]),
2501 ("\u{1100}\u{A}", &["\u{1100}", "\u{A}"]),
2502 ("\u{1100}\u{308}\u{A}", &["\u{1100}\u{308}", "\u{A}"]),
2503 ("\u{1100}\u{1}", &["\u{1100}", "\u{1}"]),
2504 ("\u{1100}\u{308}\u{1}", &["\u{1100}\u{308}", "\u{1}"]),
2505 ("\u{1100}\u{300}", &["\u{1100}\u{300}"]),
2506 ("\u{1100}\u{308}\u{300}", &["\u{1100}\u{308}\u{300}"]),
2507 ("\u{1100}\u{1100}", &["\u{1100}\u{1100}"]),
2508 ("\u{1100}\u{308}\u{1100}", &["\u{1100}\u{308}", "\u{1100}"]),
2509 ("\u{1100}\u{1160}", &["\u{1100}\u{1160}"]),
2510 ("\u{1100}\u{308}\u{1160}", &["\u{1100}\u{308}", "\u{1160}"]),
2511 ("\u{1100}\u{11A8}", &["\u{1100}", "\u{11A8}"]),
2512 ("\u{1100}\u{308}\u{11A8}", &["\u{1100}\u{308}", "\u{11A8}"]),
2513 ("\u{1100}\u{AC00}", &["\u{1100}\u{AC00}"]),
2514 ("\u{1100}\u{308}\u{AC00}", &["\u{1100}\u{308}", "\u{AC00}"]),
2515 ("\u{1100}\u{AC01}", &["\u{1100}\u{AC01}"]),
2516 ("\u{1100}\u{308}\u{AC01}", &["\u{1100}\u{308}", "\u{AC01}"]),
2517 ("\u{1100}\u{1F1E6}", &["\u{1100}", "\u{1F1E6}"]),
2518 ("\u{1100}\u{308}\u{1F1E6}", &["\u{1100}\u{308}", "\u{1F1E6}"]),
2519 ("\u{1100}\u{378}", &["\u{1100}", "\u{378}"]),
2520 ("\u{1100}\u{308}\u{378}", &["\u{1100}\u{308}", "\u{378}"]),
2521 ("\u{1160}\u{20}", &["\u{1160}", "\u{20}"]),
2522 ("\u{1160}\u{308}\u{20}", &["\u{1160}\u{308}", "\u{20}"]),
2523 ("\u{1160}\u{D}", &["\u{1160}", "\u{D}"]),
2524 ("\u{1160}\u{308}\u{D}", &["\u{1160}\u{308}", "\u{D}"]),
2525 ("\u{1160}\u{A}", &["\u{1160}", "\u{A}"]),
2526 ("\u{1160}\u{308}\u{A}", &["\u{1160}\u{308}", "\u{A}"]),
2527 ("\u{1160}\u{1}", &["\u{1160}", "\u{1}"]),
2528 ("\u{1160}\u{308}\u{1}", &["\u{1160}\u{308}", "\u{1}"]),
2529 ("\u{1160}\u{300}", &["\u{1160}\u{300}"]),
2530 ("\u{1160}\u{308}\u{300}", &["\u{1160}\u{308}\u{300}"]),
2531 ("\u{1160}\u{1100}", &["\u{1160}", "\u{1100}"]),
2532 ("\u{1160}\u{308}\u{1100}", &["\u{1160}\u{308}", "\u{1100}"]),
2533 ("\u{1160}\u{1160}", &["\u{1160}\u{1160}"]),
2534 ("\u{1160}\u{308}\u{1160}", &["\u{1160}\u{308}", "\u{1160}"]),
2535 ("\u{1160}\u{11A8}", &["\u{1160}\u{11A8}"]),
2536 ("\u{1160}\u{308}\u{11A8}", &["\u{1160}\u{308}", "\u{11A8}"]),
2537 ("\u{1160}\u{AC00}", &["\u{1160}", "\u{AC00}"]),
2538 ("\u{1160}\u{308}\u{AC00}", &["\u{1160}\u{308}", "\u{AC00}"]),
2539 ("\u{1160}\u{AC01}", &["\u{1160}", "\u{AC01}"]),
2540 ("\u{1160}\u{308}\u{AC01}", &["\u{1160}\u{308}", "\u{AC01}"]),
2541 ("\u{1160}\u{1F1E6}", &["\u{1160}", "\u{1F1E6}"]),
2542 ("\u{1160}\u{308}\u{1F1E6}", &["\u{1160}\u{308}", "\u{1F1E6}"]),
2543 ("\u{1160}\u{378}", &["\u{1160}", "\u{378}"]),
2544 ("\u{1160}\u{308}\u{378}", &["\u{1160}\u{308}", "\u{378}"]),
2545 ("\u{11A8}\u{20}", &["\u{11A8}", "\u{20}"]),
2546 ("\u{11A8}\u{308}\u{20}", &["\u{11A8}\u{308}", "\u{20}"]),
2547 ("\u{11A8}\u{D}", &["\u{11A8}", "\u{D}"]),
2548 ("\u{11A8}\u{308}\u{D}", &["\u{11A8}\u{308}", "\u{D}"]),
2549 ("\u{11A8}\u{A}", &["\u{11A8}", "\u{A}"]),
2550 ("\u{11A8}\u{308}\u{A}", &["\u{11A8}\u{308}", "\u{A}"]),
2551 ("\u{11A8}\u{1}", &["\u{11A8}", "\u{1}"]),
2552 ("\u{11A8}\u{308}\u{1}", &["\u{11A8}\u{308}", "\u{1}"]),
2553 ("\u{11A8}\u{300}", &["\u{11A8}\u{300}"]),
2554 ("\u{11A8}\u{308}\u{300}", &["\u{11A8}\u{308}\u{300}"]),
2555 ("\u{11A8}\u{1100}", &["\u{11A8}", "\u{1100}"]),
2556 ("\u{11A8}\u{308}\u{1100}", &["\u{11A8}\u{308}", "\u{1100}"]),
2557 ("\u{11A8}\u{1160}", &["\u{11A8}", "\u{1160}"]),
2558 ("\u{11A8}\u{308}\u{1160}", &["\u{11A8}\u{308}", "\u{1160}"]),
2559 ("\u{11A8}\u{11A8}", &["\u{11A8}\u{11A8}"]),
2560 ("\u{11A8}\u{308}\u{11A8}", &["\u{11A8}\u{308}", "\u{11A8}"]),
2561 ("\u{11A8}\u{AC00}", &["\u{11A8}", "\u{AC00}"]),
2562 ("\u{11A8}\u{308}\u{AC00}", &["\u{11A8}\u{308}", "\u{AC00}"]),
2563 ("\u{11A8}\u{AC01}", &["\u{11A8}", "\u{AC01}"]),
2564 ("\u{11A8}\u{308}\u{AC01}", &["\u{11A8}\u{308}", "\u{AC01}"]),
2565 ("\u{11A8}\u{1F1E6}", &["\u{11A8}", "\u{1F1E6}"]),
2566 ("\u{11A8}\u{308}\u{1F1E6}", &["\u{11A8}\u{308}", "\u{1F1E6}"]),
2567 ("\u{11A8}\u{378}", &["\u{11A8}", "\u{378}"]),
2568 ("\u{11A8}\u{308}\u{378}", &["\u{11A8}\u{308}", "\u{378}"]),
2569 ("\u{AC00}\u{20}", &["\u{AC00}", "\u{20}"]),
2570 ("\u{AC00}\u{308}\u{20}", &["\u{AC00}\u{308}", "\u{20}"]),
2571 ("\u{AC00}\u{D}", &["\u{AC00}", "\u{D}"]),
2572 ("\u{AC00}\u{308}\u{D}", &["\u{AC00}\u{308}", "\u{D}"]),
2573 ("\u{AC00}\u{A}", &["\u{AC00}", "\u{A}"]),
2574 ("\u{AC00}\u{308}\u{A}", &["\u{AC00}\u{308}", "\u{A}"]),
2575 ("\u{AC00}\u{1}", &["\u{AC00}", "\u{1}"]),
2576 ("\u{AC00}\u{308}\u{1}", &["\u{AC00}\u{308}", "\u{1}"]),
2577 ("\u{AC00}\u{300}", &["\u{AC00}\u{300}"]),
2578 ("\u{AC00}\u{308}\u{300}", &["\u{AC00}\u{308}\u{300}"]),
2579 ("\u{AC00}\u{1100}", &["\u{AC00}", "\u{1100}"]),
2580 ("\u{AC00}\u{308}\u{1100}", &["\u{AC00}\u{308}", "\u{1100}"]),
2581 ("\u{AC00}\u{1160}", &["\u{AC00}\u{1160}"]),
2582 ("\u{AC00}\u{308}\u{1160}", &["\u{AC00}\u{308}", "\u{1160}"]),
2583 ("\u{AC00}\u{11A8}", &["\u{AC00}\u{11A8}"]),
2584 ("\u{AC00}\u{308}\u{11A8}", &["\u{AC00}\u{308}", "\u{11A8}"]),
2585 ("\u{AC00}\u{AC00}", &["\u{AC00}", "\u{AC00}"]),
2586 ("\u{AC00}\u{308}\u{AC00}", &["\u{AC00}\u{308}", "\u{AC00}"]),
2587 ("\u{AC00}\u{AC01}", &["\u{AC00}", "\u{AC01}"]),
2588 ("\u{AC00}\u{308}\u{AC01}", &["\u{AC00}\u{308}", "\u{AC01}"]),
2589 ("\u{AC00}\u{1F1E6}", &["\u{AC00}", "\u{1F1E6}"]),
2590 ("\u{AC00}\u{308}\u{1F1E6}", &["\u{AC00}\u{308}", "\u{1F1E6}"]),
2591 ("\u{AC00}\u{378}", &["\u{AC00}", "\u{378}"]),
2592 ("\u{AC00}\u{308}\u{378}", &["\u{AC00}\u{308}", "\u{378}"]),
2593 ("\u{AC01}\u{20}", &["\u{AC01}", "\u{20}"]),
2594 ("\u{AC01}\u{308}\u{20}", &["\u{AC01}\u{308}", "\u{20}"]),
2595 ("\u{AC01}\u{D}", &["\u{AC01}", "\u{D}"]),
2596 ("\u{AC01}\u{308}\u{D}", &["\u{AC01}\u{308}", "\u{D}"]),
2597 ("\u{AC01}\u{A}", &["\u{AC01}", "\u{A}"]),
2598 ("\u{AC01}\u{308}\u{A}", &["\u{AC01}\u{308}", "\u{A}"]),
2599 ("\u{AC01}\u{1}", &["\u{AC01}", "\u{1}"]),
2600 ("\u{AC01}\u{308}\u{1}", &["\u{AC01}\u{308}", "\u{1}"]),
2601 ("\u{AC01}\u{300}", &["\u{AC01}\u{300}"]),
2602 ("\u{AC01}\u{308}\u{300}", &["\u{AC01}\u{308}\u{300}"]),
2603 ("\u{AC01}\u{1100}", &["\u{AC01}", "\u{1100}"]),
2604 ("\u{AC01}\u{308}\u{1100}", &["\u{AC01}\u{308}", "\u{1100}"]),
2605 ("\u{AC01}\u{1160}", &["\u{AC01}", "\u{1160}"]),
2606 ("\u{AC01}\u{308}\u{1160}", &["\u{AC01}\u{308}", "\u{1160}"]),
2607 ("\u{AC01}\u{11A8}", &["\u{AC01}\u{11A8}"]),
2608 ("\u{AC01}\u{308}\u{11A8}", &["\u{AC01}\u{308}", "\u{11A8}"]),
2609 ("\u{AC01}\u{AC00}", &["\u{AC01}", "\u{AC00}"]),
2610 ("\u{AC01}\u{308}\u{AC00}", &["\u{AC01}\u{308}", "\u{AC00}"]),
2611 ("\u{AC01}\u{AC01}", &["\u{AC01}", "\u{AC01}"]),
2612 ("\u{AC01}\u{308}\u{AC01}", &["\u{AC01}\u{308}", "\u{AC01}"]),
2613 ("\u{AC01}\u{1F1E6}", &["\u{AC01}", "\u{1F1E6}"]),
2614 ("\u{AC01}\u{308}\u{1F1E6}", &["\u{AC01}\u{308}", "\u{1F1E6}"]),
2615 ("\u{AC01}\u{378}", &["\u{AC01}", "\u{378}"]),
2616 ("\u{AC01}\u{308}\u{378}", &["\u{AC01}\u{308}", "\u{378}"]),
2617 ("\u{1F1E6}\u{20}", &["\u{1F1E6}", "\u{20}"]),
2618 ("\u{1F1E6}\u{308}\u{20}", &["\u{1F1E6}\u{308}", "\u{20}"]),
2619 ("\u{1F1E6}\u{D}", &["\u{1F1E6}", "\u{D}"]),
2620 ("\u{1F1E6}\u{308}\u{D}", &["\u{1F1E6}\u{308}", "\u{D}"]),
2621 ("\u{1F1E6}\u{A}", &["\u{1F1E6}", "\u{A}"]),
2622 ("\u{1F1E6}\u{308}\u{A}", &["\u{1F1E6}\u{308}", "\u{A}"]),
2623 ("\u{1F1E6}\u{1}", &["\u{1F1E6}", "\u{1}"]),
2624 ("\u{1F1E6}\u{308}\u{1}", &["\u{1F1E6}\u{308}", "\u{1}"]),
2625 ("\u{1F1E6}\u{300}", &["\u{1F1E6}\u{300}"]),
2626 ("\u{1F1E6}\u{308}\u{300}", &["\u{1F1E6}\u{308}\u{300}"]),
2627 ("\u{1F1E6}\u{1100}", &["\u{1F1E6}", "\u{1100}"]),
2628 ("\u{1F1E6}\u{308}\u{1100}", &["\u{1F1E6}\u{308}", "\u{1100}"]),
2629 ("\u{1F1E6}\u{1160}", &["\u{1F1E6}", "\u{1160}"]),
2630 ("\u{1F1E6}\u{308}\u{1160}", &["\u{1F1E6}\u{308}", "\u{1160}"]),
2631 ("\u{1F1E6}\u{11A8}", &["\u{1F1E6}", "\u{11A8}"]),
2632 ("\u{1F1E6}\u{308}\u{11A8}", &["\u{1F1E6}\u{308}", "\u{11A8}"]),
2633 ("\u{1F1E6}\u{AC00}", &["\u{1F1E6}", "\u{AC00}"]),
2634 ("\u{1F1E6}\u{308}\u{AC00}", &["\u{1F1E6}\u{308}", "\u{AC00}"]),
2635 ("\u{1F1E6}\u{AC01}", &["\u{1F1E6}", "\u{AC01}"]),
2636 ("\u{1F1E6}\u{308}\u{AC01}", &["\u{1F1E6}\u{308}", "\u{AC01}"]),
2637 ("\u{1F1E6}\u{1F1E6}", &["\u{1F1E6}\u{1F1E6}"]),
2638 ("\u{1F1E6}\u{308}\u{1F1E6}", &["\u{1F1E6}\u{308}", "\u{1F1E6}"]),
2639 ("\u{1F1E6}\u{378}", &["\u{1F1E6}", "\u{378}"]),
2640 ("\u{1F1E6}\u{308}\u{378}", &["\u{1F1E6}\u{308}", "\u{378}"]),
2641 ("\u{378}\u{20}", &["\u{378}", "\u{20}"]),
2642 ("\u{378}\u{308}\u{20}", &["\u{378}\u{308}", "\u{20}"]),
2643 ("\u{378}\u{D}", &["\u{378}", "\u{D}"]),
2644 ("\u{378}\u{308}\u{D}", &["\u{378}\u{308}", "\u{D}"]),
2645 ("\u{378}\u{A}", &["\u{378}", "\u{A}"]),
2646 ("\u{378}\u{308}\u{A}", &["\u{378}\u{308}", "\u{A}"]),
2647 ("\u{378}\u{1}", &["\u{378}", "\u{1}"]),
2648 ("\u{378}\u{308}\u{1}", &["\u{378}\u{308}", "\u{1}"]),
2649 ("\u{378}\u{300}", &["\u{378}\u{300}"]),
2650 ("\u{378}\u{308}\u{300}", &["\u{378}\u{308}\u{300}"]),
2651 ("\u{378}\u{1100}", &["\u{378}", "\u{1100}"]),
2652 ("\u{378}\u{308}\u{1100}", &["\u{378}\u{308}", "\u{1100}"]),
2653 ("\u{378}\u{1160}", &["\u{378}", "\u{1160}"]),
2654 ("\u{378}\u{308}\u{1160}", &["\u{378}\u{308}", "\u{1160}"]),
2655 ("\u{378}\u{11A8}", &["\u{378}", "\u{11A8}"]),
2656 ("\u{378}\u{308}\u{11A8}", &["\u{378}\u{308}", "\u{11A8}"]),
2657 ("\u{378}\u{AC00}", &["\u{378}", "\u{AC00}"]),
2658 ("\u{378}\u{308}\u{AC00}", &["\u{378}\u{308}", "\u{AC00}"]),
2659 ("\u{378}\u{AC01}", &["\u{378}", "\u{AC01}"]),
2660 ("\u{378}\u{308}\u{AC01}", &["\u{378}\u{308}", "\u{AC01}"]),
2661 ("\u{378}\u{1F1E6}", &["\u{378}", "\u{1F1E6}"]),
2662 ("\u{378}\u{308}\u{1F1E6}", &["\u{378}\u{308}", "\u{1F1E6}"]),
2663 ("\u{378}\u{378}", &["\u{378}", "\u{378}"]),
2664 ("\u{378}\u{308}\u{378}", &["\u{378}\u{308}", "\u{378}"]),
2665 ("\u{61}\u{1F1E6}\u{62}", &["\u{61}", "\u{1F1E6}", "\u{62}"]),
2666 ("\u{1F1F7}\u{1F1FA}", &["\u{1F1F7}\u{1F1FA}"]),
2667 ("\u{1F1F7}\u{1F1FA}\u{1F1F8}", &["\u{1F1F7}\u{1F1FA}\u{1F1F8}"]),
2668 ("\u{1F1F7}\u{1F1FA}\u{1F1F8}\u{1F1EA}",
2669 &["\u{1F1F7}\u{1F1FA}\u{1F1F8}\u{1F1EA}"]),
2670 ("\u{1F1F7}\u{1F1FA}\u{200B}\u{1F1F8}\u{1F1EA}",
2671 &["\u{1F1F7}\u{1F1FA}", "\u{200B}", "\u{1F1F8}\u{1F1EA}"]),
2672 ("\u{1F1E6}\u{1F1E7}\u{1F1E8}", &["\u{1F1E6}\u{1F1E7}\u{1F1E8}"]),
2673 ("\u{1F1E6}\u{200D}\u{1F1E7}\u{1F1E8}", &["\u{1F1E6}\u{200D}",
2674 "\u{1F1E7}\u{1F1E8}"]),
2675 ("\u{1F1E6}\u{1F1E7}\u{200D}\u{1F1E8}",
2676 &["\u{1F1E6}\u{1F1E7}\u{200D}", "\u{1F1E8}"]),
2677 ("\u{20}\u{200D}\u{646}", &["\u{20}\u{200D}", "\u{646}"]),
2678 ("\u{646}\u{200D}\u{20}", &["\u{646}\u{200D}", "\u{20}"]),
2681 let test_diff: [(_, &[_], &[_]); 23] = [
2682 ("\u{20}\u{903}", &["\u{20}\u{903}"], &["\u{20}", "\u{903}"]), ("\u{20}\u{308}\u{903}",
2683 &["\u{20}\u{308}\u{903}"], &["\u{20}\u{308}", "\u{903}"]), ("\u{D}\u{308}\u{903}",
2684 &["\u{D}", "\u{308}\u{903}"], &["\u{D}", "\u{308}", "\u{903}"]), ("\u{A}\u{308}\u{903}",
2685 &["\u{A}", "\u{308}\u{903}"], &["\u{A}", "\u{308}", "\u{903}"]), ("\u{1}\u{308}\u{903}",
2686 &["\u{1}", "\u{308}\u{903}"], &["\u{1}", "\u{308}", "\u{903}"]), ("\u{300}\u{903}",
2687 &["\u{300}\u{903}"], &["\u{300}", "\u{903}"]), ("\u{300}\u{308}\u{903}",
2688 &["\u{300}\u{308}\u{903}"], &["\u{300}\u{308}", "\u{903}"]), ("\u{903}\u{903}",
2689 &["\u{903}\u{903}"], &["\u{903}", "\u{903}"]), ("\u{903}\u{308}\u{903}",
2690 &["\u{903}\u{308}\u{903}"], &["\u{903}\u{308}", "\u{903}"]), ("\u{1100}\u{903}",
2691 &["\u{1100}\u{903}"], &["\u{1100}", "\u{903}"]), ("\u{1100}\u{308}\u{903}",
2692 &["\u{1100}\u{308}\u{903}"], &["\u{1100}\u{308}", "\u{903}"]), ("\u{1160}\u{903}",
2693 &["\u{1160}\u{903}"], &["\u{1160}", "\u{903}"]), ("\u{1160}\u{308}\u{903}",
2694 &["\u{1160}\u{308}\u{903}"], &["\u{1160}\u{308}", "\u{903}"]), ("\u{11A8}\u{903}",
2695 &["\u{11A8}\u{903}"], &["\u{11A8}", "\u{903}"]), ("\u{11A8}\u{308}\u{903}",
2696 &["\u{11A8}\u{308}\u{903}"], &["\u{11A8}\u{308}", "\u{903}"]), ("\u{AC00}\u{903}",
2697 &["\u{AC00}\u{903}"], &["\u{AC00}", "\u{903}"]), ("\u{AC00}\u{308}\u{903}",
2698 &["\u{AC00}\u{308}\u{903}"], &["\u{AC00}\u{308}", "\u{903}"]), ("\u{AC01}\u{903}",
2699 &["\u{AC01}\u{903}"], &["\u{AC01}", "\u{903}"]), ("\u{AC01}\u{308}\u{903}",
2700 &["\u{AC01}\u{308}\u{903}"], &["\u{AC01}\u{308}", "\u{903}"]), ("\u{1F1E6}\u{903}",
2701 &["\u{1F1E6}\u{903}"], &["\u{1F1E6}", "\u{903}"]), ("\u{1F1E6}\u{308}\u{903}",
2702 &["\u{1F1E6}\u{308}\u{903}"], &["\u{1F1E6}\u{308}", "\u{903}"]), ("\u{378}\u{903}",
2703 &["\u{378}\u{903}"], &["\u{378}", "\u{903}"]), ("\u{378}\u{308}\u{903}",
2704 &["\u{378}\u{308}\u{903}"], &["\u{378}\u{308}", "\u{903}"]),
2707 for &(s, g) in &test_same[] {
2708 // test forward iterator
2709 assert!(order::equals(s.graphemes(true), g.iter().cloned()));
2710 assert!(order::equals(s.graphemes(false), g.iter().cloned()));
2712 // test reverse iterator
2713 assert!(order::equals(s.graphemes(true).rev(), g.iter().rev().cloned()));
2714 assert!(order::equals(s.graphemes(false).rev(), g.iter().rev().cloned()));
2717 for &(s, gt, gf) in &test_diff {
2718 // test forward iterator
2719 assert!(order::equals(s.graphemes(true), gt.iter().cloned()));
2720 assert!(order::equals(s.graphemes(false), gf.iter().cloned()));
2722 // test reverse iterator
2723 assert!(order::equals(s.graphemes(true).rev(), gt.iter().rev().cloned()));
2724 assert!(order::equals(s.graphemes(false).rev(), gf.iter().rev().cloned()));
2727 // test the indices iterators
2728 let s = "a̐éö̲\r\n";
2729 let gr_inds = s.grapheme_indices(true).collect::<Vec<(usize, &str)>>();
2730 let b: &[_] = &[(0, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")];
2731 assert_eq!(gr_inds, b);
2732 let gr_inds = s.grapheme_indices(true).rev().collect::<Vec<(usize, &str)>>();
2733 let b: &[_] = &[(11, "\r\n"), (6, "ö̲"), (3, "é"), (0, "a̐")];
2734 assert_eq!(gr_inds, b);
2735 let mut gr_inds_iter = s.grapheme_indices(true);
2737 let gr_inds = gr_inds_iter.by_ref();
2738 let e1 = gr_inds.size_hint();
2739 assert_eq!(e1, (1, Some(13)));
2740 let c = gr_inds.count();
2743 let e2 = gr_inds_iter.size_hint();
2744 assert_eq!(e2, (0, Some(0)));
2746 // make sure the reverse iterator does the right thing with "\n" at beginning of string
2748 let gr = s.graphemes(true).rev().collect::<Vec<&str>>();
2749 let b: &[_] = &["\r", "\r\n", "\n"];
2754 fn test_split_strator() {
2755 fn t(s: &str, sep: &str, u: &[&str]) {
2756 let v: Vec<&str> = s.split_str(sep).collect();
2759 t("--1233345--", "12345", &["--1233345--"]);
2760 t("abc::hello::there", "::", &["abc", "hello", "there"]);
2761 t("::hello::there", "::", &["", "hello", "there"]);
2762 t("hello::there::", "::", &["hello", "there", ""]);
2763 t("::hello::there::", "::", &["", "hello", "there", ""]);
2764 t("ประเทศไทย中华Việt Nam", "中华", &["ประเทศไทย", "Việt Nam"]);
2765 t("zzXXXzzYYYzz", "zz", &["", "XXX", "YYY", ""]);
2766 t("zzXXXzYYYz", "XXX", &["zz", "zYYYz"]);
2767 t(".XXX.YYY.", ".", &["", "XXX", "YYY", ""]);
2769 t("zz", "zz", &["",""]);
2770 t("ok", "z", &["ok"]);
2771 t("zzz", "zz", &["","z"]);
2772 t("zzzzz", "zz", &["","","z"]);
2776 fn test_str_default() {
2777 use core::default::Default;
2778 fn t<S: Default + Str>() {
2779 let s: S = Default::default();
2780 assert_eq!(s.as_slice(), "");
2788 fn test_str_container() {
2789 fn sum_len(v: &[&str]) -> usize {
2790 v.iter().map(|x| x.len()).sum()
2793 let s = String::from_str("01234");
2794 assert_eq!(5, sum_len(&["012", "", "34"]));
2795 assert_eq!(5, sum_len(&[&String::from_str("01"),
2796 &String::from_str("2"),
2797 &String::from_str("34"),
2798 &String::from_str("")]));
2799 assert_eq!(5, sum_len(&[&s]));
2803 fn test_str_from_utf8() {
2805 assert_eq!(from_utf8(xs), Ok("hello"));
2807 let xs = "ศไทย中华Việt Nam".as_bytes();
2808 assert_eq!(from_utf8(xs), Ok("ศไทย中华Việt Nam"));
2810 let xs = b"hello\xFF";
2811 assert_eq!(from_utf8(xs), Err(Utf8Error::TooShort));
2818 use prelude::{SliceExt, IteratorExt, SliceConcatExt};
2820 use test::black_box;
2823 fn char_iterator(b: &mut Bencher) {
2824 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2826 b.iter(|| s.chars().count());
2830 fn char_iterator_for(b: &mut Bencher) {
2831 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2834 for ch in s.chars() { black_box(ch); }
2839 fn char_iterator_ascii(b: &mut Bencher) {
2840 let s = "Mary had a little lamb, Little lamb
2841 Mary had a little lamb, Little lamb
2842 Mary had a little lamb, Little lamb
2843 Mary had a little lamb, Little lamb
2844 Mary had a little lamb, Little lamb
2845 Mary had a little lamb, Little lamb";
2847 b.iter(|| s.chars().count());
2851 fn char_iterator_rev(b: &mut Bencher) {
2852 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2854 b.iter(|| s.chars().rev().count());
2858 fn char_iterator_rev_for(b: &mut Bencher) {
2859 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2862 for ch in s.chars().rev() { black_box(ch); }
2867 fn char_indicesator(b: &mut Bencher) {
2868 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2869 let len = s.chars().count();
2871 b.iter(|| assert_eq!(s.char_indices().count(), len));
2875 fn char_indicesator_rev(b: &mut Bencher) {
2876 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2877 let len = s.chars().count();
2879 b.iter(|| assert_eq!(s.char_indices().rev().count(), len));
2883 fn split_unicode_ascii(b: &mut Bencher) {
2884 let s = "ประเทศไทย中华Việt Namประเทศไทย中华Việt Nam";
2886 b.iter(|| assert_eq!(s.split('V').count(), 3));
2890 fn split_unicode_not_ascii(b: &mut Bencher) {
2891 struct NotAscii(char);
2892 impl CharEq for NotAscii {
2893 fn matches(&mut self, c: char) -> bool {
2894 let NotAscii(cc) = *self;
2897 fn only_ascii(&self) -> bool { false }
2899 let s = "ประเทศไทย中华Việt Namประเทศไทย中华Việt Nam";
2901 b.iter(|| assert_eq!(s.split(NotAscii('V')).count(), 3));
2906 fn split_ascii(b: &mut Bencher) {
2907 let s = "Mary had a little lamb, Little lamb, little-lamb.";
2908 let len = s.split(' ').count();
2910 b.iter(|| assert_eq!(s.split(' ').count(), len));
2914 fn split_not_ascii(b: &mut Bencher) {
2915 struct NotAscii(char);
2916 impl CharEq for NotAscii {
2918 fn matches(&mut self, c: char) -> bool {
2919 let NotAscii(cc) = *self;
2922 fn only_ascii(&self) -> bool { false }
2924 let s = "Mary had a little lamb, Little lamb, little-lamb.";
2925 let len = s.split(' ').count();
2927 b.iter(|| assert_eq!(s.split(NotAscii(' ')).count(), len));
2931 fn split_extern_fn(b: &mut Bencher) {
2932 let s = "Mary had a little lamb, Little lamb, little-lamb.";
2933 let len = s.split(' ').count();
2934 fn pred(c: char) -> bool { c == ' ' }
2936 b.iter(|| assert_eq!(s.split(pred).count(), len));
2940 fn split_closure(b: &mut Bencher) {
2941 let s = "Mary had a little lamb, Little lamb, little-lamb.";
2942 let len = s.split(' ').count();
2944 b.iter(|| assert_eq!(s.split(|c: char| c == ' ').count(), len));
2948 fn split_slice(b: &mut Bencher) {
2949 let s = "Mary had a little lamb, Little lamb, little-lamb.";
2950 let len = s.split(' ').count();
2952 let c: &[char] = &[' '];
2953 b.iter(|| assert_eq!(s.split(c).count(), len));
2957 fn bench_connect(b: &mut Bencher) {
2958 let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
2960 let v = vec![s, s, s, s, s, s, s, s, s, s];
2962 assert_eq!(v.connect(sep).len(), s.len() * 10 + sep.len() * 9);
2967 fn bench_contains_short_short(b: &mut Bencher) {
2968 let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
2972 assert!(haystack.contains(needle));
2977 fn bench_contains_short_long(b: &mut Bencher) {
2979 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
2980 ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
2981 eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
2982 sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \
2983 tempus vel, gravida nec quam.
2985 In est dui, tincidunt sed tempus interdum, adipiscing laoreet ante. Etiam tempor, tellus quis \
2986 sagittis interdum, nulla purus mattis sem, quis auctor erat odio ac tellus. In nec nunc sit amet \
2987 diam volutpat molestie at sed ipsum. Vestibulum laoreet consequat vulputate. Integer accumsan \
2988 lorem ac dignissim placerat. Suspendisse convallis faucibus lorem. Aliquam erat volutpat. In vel \
2989 eleifend felis. Sed suscipit nulla lorem, sed mollis est sollicitudin et. Nam fermentum egestas \
2990 interdum. Curabitur ut nisi justo.
2992 Sed sollicitudin ipsum tellus, ut condimentum leo eleifend nec. Cras ut velit ante. Phasellus nec \
2993 mollis odio. Mauris molestie erat in arcu mattis, at aliquet dolor vehicula. Quisque malesuada \
2994 lectus sit amet nisi pretium, a condimentum ipsum porta. Morbi at dapibus diam. Praesent egestas \
2995 est sed risus elementum, eu rutrum metus ultrices. Etiam fermentum consectetur magna, id rutrum \
2996 felis accumsan a. Aliquam ut pellentesque libero. Sed mi nulla, lobortis eu tortor id, suscipit \
2997 ultricies neque. Morbi iaculis sit amet risus at iaculis. Praesent eget ligula quis turpis \
2998 feugiat suscipit vel non arcu. Interdum et malesuada fames ac ante ipsum primis in faucibus. \
2999 Aliquam sit amet placerat lorem.
3001 Cras a lacus vel ante posuere elementum. Nunc est leo, bibendum ut facilisis vel, bibendum at \
3002 mauris. Nullam adipiscing diam vel odio ornare, luctus adipiscing mi luctus. Nulla facilisi. \
3003 Mauris adipiscing bibendum neque, quis adipiscing lectus tempus et. Sed feugiat erat et nisl \
3004 lobortis pharetra. Donec vitae erat enim. Nullam sit amet felis et quam lacinia tincidunt. Aliquam \
3005 suscipit dapibus urna. Sed volutpat urna in magna pulvinar volutpat. Phasellus nec tellus ac diam \
3008 Nam lectus enim, dapibus non nisi tempor, consectetur convallis massa. Maecenas eleifend dictum \
3009 feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, imperdiet id \
3010 vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
3011 leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
3012 malesuada sollicitudin quam eu fermentum.";
3013 let needle = "english";
3016 assert!(!haystack.contains(needle));
3021 fn bench_contains_bad_naive(b: &mut Bencher) {
3022 let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
3023 let needle = "aaaaaaaab";
3026 assert!(!haystack.contains(needle));
3031 fn bench_contains_equal(b: &mut Bencher) {
3032 let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
3033 let needle = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
3036 assert!(haystack.contains(needle));