1 //! Buffering wrappers for I/O traits
3 use crate::io::prelude::*;
9 self, Error, ErrorKind, Initializer, IoSlice, IoSliceMut, SeekFrom, DEFAULT_BUF_SIZE,
13 /// The `BufReader<R>` struct adds buffering to any reader.
15 /// It can be excessively inefficient to work directly with a [`Read`] instance.
16 /// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
17 /// results in a system call. A `BufReader<R>` performs large, infrequent reads on
18 /// the underlying [`Read`] and maintains an in-memory buffer of the results.
20 /// `BufReader<R>` can improve the speed of programs that make *small* and
21 /// *repeated* read calls to the same file or network socket. It does not
22 /// help when reading very large amounts at once, or reading just one or a few
23 /// times. It also provides no advantage when reading from a source that is
24 /// already in memory, like a `Vec<u8>`.
26 /// When the `BufReader<R>` is dropped, the contents of its buffer will be
27 /// discarded. Creating multiple instances of a `BufReader<R>` on the same
28 /// stream can cause data loss. Reading from the underlying reader after
29 /// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause
32 /// [`Read`]: ../../std/io/trait.Read.html
33 /// [`TcpStream::read`]: ../../std/net/struct.TcpStream.html#method.read
34 /// [`TcpStream`]: ../../std/net/struct.TcpStream.html
39 /// use std::io::prelude::*;
40 /// use std::io::BufReader;
41 /// use std::fs::File;
43 /// fn main() -> std::io::Result<()> {
44 /// let f = File::open("log.txt")?;
45 /// let mut reader = BufReader::new(f);
47 /// let mut line = String::new();
48 /// let len = reader.read_line(&mut line)?;
49 /// println!("First line is {} bytes long", len);
53 #[stable(feature = "rust1", since = "1.0.0")]
54 pub struct BufReader<R> {
61 impl<R: Read> BufReader<R> {
62 /// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB,
63 /// but may change in the future.
68 /// use std::io::BufReader;
69 /// use std::fs::File;
71 /// fn main() -> std::io::Result<()> {
72 /// let f = File::open("log.txt")?;
73 /// let reader = BufReader::new(f);
77 #[stable(feature = "rust1", since = "1.0.0")]
78 pub fn new(inner: R) -> BufReader<R> {
79 BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
82 /// Creates a new `BufReader<R>` with the specified buffer capacity.
86 /// Creating a buffer with ten bytes of capacity:
89 /// use std::io::BufReader;
90 /// use std::fs::File;
92 /// fn main() -> std::io::Result<()> {
93 /// let f = File::open("log.txt")?;
94 /// let reader = BufReader::with_capacity(10, f);
98 #[stable(feature = "rust1", since = "1.0.0")]
99 pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
101 let mut buffer = Vec::with_capacity(capacity);
102 buffer.set_len(capacity);
103 inner.initializer().initialize(&mut buffer);
104 BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 }
109 impl<R> BufReader<R> {
110 /// Gets a reference to the underlying reader.
112 /// It is inadvisable to directly read from the underlying reader.
117 /// use std::io::BufReader;
118 /// use std::fs::File;
120 /// fn main() -> std::io::Result<()> {
121 /// let f1 = File::open("log.txt")?;
122 /// let reader = BufReader::new(f1);
124 /// let f2 = reader.get_ref();
128 #[stable(feature = "rust1", since = "1.0.0")]
129 pub fn get_ref(&self) -> &R {
133 /// Gets a mutable reference to the underlying reader.
135 /// It is inadvisable to directly read from the underlying reader.
140 /// use std::io::BufReader;
141 /// use std::fs::File;
143 /// fn main() -> std::io::Result<()> {
144 /// let f1 = File::open("log.txt")?;
145 /// let mut reader = BufReader::new(f1);
147 /// let f2 = reader.get_mut();
151 #[stable(feature = "rust1", since = "1.0.0")]
152 pub fn get_mut(&mut self) -> &mut R {
156 /// Returns a reference to the internally buffered data.
158 /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
163 /// use std::io::{BufReader, BufRead};
164 /// use std::fs::File;
166 /// fn main() -> std::io::Result<()> {
167 /// let f = File::open("log.txt")?;
168 /// let mut reader = BufReader::new(f);
169 /// assert!(reader.buffer().is_empty());
171 /// if reader.fill_buf()?.len() > 0 {
172 /// assert!(!reader.buffer().is_empty());
177 #[stable(feature = "bufreader_buffer", since = "1.37.0")]
178 pub fn buffer(&self) -> &[u8] {
179 &self.buf[self.pos..self.cap]
182 /// Returns the number of bytes the internal buffer can hold at once.
187 /// use std::io::{BufReader, BufRead};
188 /// use std::fs::File;
190 /// fn main() -> std::io::Result<()> {
191 /// let f = File::open("log.txt")?;
192 /// let mut reader = BufReader::new(f);
194 /// let capacity = reader.capacity();
195 /// let buffer = reader.fill_buf()?;
196 /// assert!(buffer.len() <= capacity);
200 #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
201 pub fn capacity(&self) -> usize {
205 /// Unwraps this `BufReader<R>`, returning the underlying reader.
207 /// Note that any leftover data in the internal buffer is lost. Therefore,
208 /// a following read from the underlying reader may lead to data loss.
213 /// use std::io::BufReader;
214 /// use std::fs::File;
216 /// fn main() -> std::io::Result<()> {
217 /// let f1 = File::open("log.txt")?;
218 /// let reader = BufReader::new(f1);
220 /// let f2 = reader.into_inner();
224 #[stable(feature = "rust1", since = "1.0.0")]
225 pub fn into_inner(self) -> R {
229 /// Invalidates all data in the internal buffer.
231 fn discard_buffer(&mut self) {
237 impl<R: Seek> BufReader<R> {
238 /// Seeks relative to the current position. If the new position lies within the buffer,
239 /// the buffer will not be flushed, allowing for more efficient seeks.
240 /// This method does not return the location of the underlying reader, so the caller
241 /// must track this information themselves if it is required.
242 #[unstable(feature = "bufreader_seek_relative", issue = "31100")]
243 pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
244 let pos = self.pos as u64;
246 if let Some(new_pos) = pos.checked_sub((-offset) as u64) {
247 self.pos = new_pos as usize;
251 if let Some(new_pos) = pos.checked_add(offset as u64) {
252 if new_pos <= self.cap as u64 {
253 self.pos = new_pos as usize;
258 self.seek(SeekFrom::Current(offset)).map(drop)
262 #[stable(feature = "rust1", since = "1.0.0")]
263 impl<R: Read> Read for BufReader<R> {
264 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
265 // If we don't have any buffered data and we're doing a massive read
266 // (larger than our internal buffer), bypass our internal buffer
268 if self.pos == self.cap && buf.len() >= self.buf.len() {
269 self.discard_buffer();
270 return self.inner.read(buf);
273 let mut rem = self.fill_buf()?;
280 fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
281 let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
282 if self.pos == self.cap && total_len >= self.buf.len() {
283 self.discard_buffer();
284 return self.inner.read_vectored(bufs);
287 let mut rem = self.fill_buf()?;
288 rem.read_vectored(bufs)?
294 fn is_read_vectored(&self) -> bool {
295 self.inner.is_read_vectored()
298 // we can't skip unconditionally because of the large buffer case in read.
299 unsafe fn initializer(&self) -> Initializer {
300 self.inner.initializer()
304 #[stable(feature = "rust1", since = "1.0.0")]
305 impl<R: Read> BufRead for BufReader<R> {
306 fn fill_buf(&mut self) -> io::Result<&[u8]> {
307 // If we've reached the end of our internal buffer then we need to fetch
308 // some more data from the underlying reader.
309 // Branch using `>=` instead of the more correct `==`
310 // to tell the compiler that the pos..cap slice is always valid.
311 if self.pos >= self.cap {
312 debug_assert!(self.pos == self.cap);
313 self.cap = self.inner.read(&mut self.buf)?;
316 Ok(&self.buf[self.pos..self.cap])
319 fn consume(&mut self, amt: usize) {
320 self.pos = cmp::min(self.pos + amt, self.cap);
324 #[stable(feature = "rust1", since = "1.0.0")]
325 impl<R> fmt::Debug for BufReader<R>
329 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
330 fmt.debug_struct("BufReader")
331 .field("reader", &self.inner)
332 .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()))
337 #[stable(feature = "rust1", since = "1.0.0")]
338 impl<R: Seek> Seek for BufReader<R> {
339 /// Seek to an offset, in bytes, in the underlying reader.
341 /// The position used for seeking with `SeekFrom::Current(_)` is the
342 /// position the underlying reader would be at if the `BufReader<R>` had no
345 /// Seeking always discards the internal buffer, even if the seek position
346 /// would otherwise fall within it. This guarantees that calling
347 /// `.into_inner()` immediately after a seek yields the underlying reader
348 /// at the same position.
350 /// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
352 /// See [`std::io::Seek`] for more details.
354 /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
355 /// where `n` minus the internal buffer length overflows an `i64`, two
356 /// seeks will be performed instead of one. If the second seek returns
357 /// `Err`, the underlying reader will be left at the same position it would
358 /// have if you called `seek` with `SeekFrom::Current(0)`.
360 /// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative
361 /// [`std::io::Seek`]: trait.Seek.html
362 fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
364 if let SeekFrom::Current(n) = pos {
365 let remainder = (self.cap - self.pos) as i64;
366 // it should be safe to assume that remainder fits within an i64 as the alternative
367 // means we managed to allocate 8 exbibytes and that's absurd.
368 // But it's not out of the realm of possibility for some weird underlying reader to
369 // support seeking by i64::MIN so we need to handle underflow when subtracting
371 if let Some(offset) = n.checked_sub(remainder) {
372 result = self.inner.seek(SeekFrom::Current(offset))?;
374 // seek backwards by our remainder, and then by the offset
375 self.inner.seek(SeekFrom::Current(-remainder))?;
376 self.discard_buffer();
377 result = self.inner.seek(SeekFrom::Current(n))?;
380 // Seeking with Start/End doesn't care about our buffer length.
381 result = self.inner.seek(pos)?;
383 self.discard_buffer();
388 /// Wraps a writer and buffers its output.
390 /// It can be excessively inefficient to work directly with something that
391 /// implements [`Write`]. For example, every call to
392 /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
393 /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
394 /// writer in large, infrequent batches.
396 /// `BufWriter<W>` can improve the speed of programs that make *small* and
397 /// *repeated* write calls to the same file or network socket. It does not
398 /// help when writing very large amounts at once, or writing just one or a few
399 /// times. It also provides no advantage when writing to a destination that is
400 /// in memory, like a `Vec<u8>`.
402 /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
403 /// dropping will attempt to flush the contents of the buffer, any errors
404 /// that happen in the process of dropping will be ignored. Calling [`flush`]
405 /// ensures that the buffer is empty and thus dropping will not even attempt
410 /// Let's write the numbers one through ten to a [`TcpStream`]:
413 /// use std::io::prelude::*;
414 /// use std::net::TcpStream;
416 /// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap();
419 /// stream.write(&[i+1]).unwrap();
423 /// Because we're not buffering, we write each one in turn, incurring the
424 /// overhead of a system call per byte written. We can fix this with a
428 /// use std::io::prelude::*;
429 /// use std::io::BufWriter;
430 /// use std::net::TcpStream;
432 /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
435 /// stream.write(&[i+1]).unwrap();
437 /// stream.flush().unwrap();
440 /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
441 /// together by the buffer and will all be written out in one system call when
442 /// the `stream` is flushed.
444 /// [`Write`]: ../../std/io/trait.Write.html
445 /// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
446 /// [`TcpStream`]: ../../std/net/struct.TcpStream.html
447 /// [`flush`]: #method.flush
448 #[stable(feature = "rust1", since = "1.0.0")]
449 pub struct BufWriter<W: Write> {
452 // #30888: If the inner writer panics in a call to write, we don't want to
453 // write the buffered data a second time in BufWriter's destructor. This
454 // flag tells the Drop impl if it should skip the flush.
458 /// An error returned by `into_inner` which combines an error that
459 /// happened while writing out the buffer, and the buffered writer object
460 /// which may be used to recover from the condition.
465 /// use std::io::BufWriter;
466 /// use std::net::TcpStream;
468 /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
470 /// // do stuff with the stream
472 /// // we want to get our `TcpStream` back, so let's try:
474 /// let stream = match stream.into_inner() {
477 /// // Here, e is an IntoInnerError
478 /// panic!("An error occurred");
483 #[stable(feature = "rust1", since = "1.0.0")]
484 pub struct IntoInnerError<W>(W, Error);
486 impl<W: Write> BufWriter<W> {
487 /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
488 /// but may change in the future.
493 /// use std::io::BufWriter;
494 /// use std::net::TcpStream;
496 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
498 #[stable(feature = "rust1", since = "1.0.0")]
499 pub fn new(inner: W) -> BufWriter<W> {
500 BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
503 /// Creates a new `BufWriter<W>` with the specified buffer capacity.
507 /// Creating a buffer with a buffer of a hundred bytes.
510 /// use std::io::BufWriter;
511 /// use std::net::TcpStream;
513 /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap();
514 /// let mut buffer = BufWriter::with_capacity(100, stream);
516 #[stable(feature = "rust1", since = "1.0.0")]
517 pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
518 BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
521 /// Send data in our local buffer into the inner writer, looping as
522 /// necessary until either it's all been sent or an error occurs.
524 /// Because all the data in the buffer has been reported to our owner as
525 /// "successfully written" (by returning nonzero success values from
526 /// `write`), any 0-length writes from `inner` must be reported as i/o
527 /// errors from this method.
528 fn flush_buf(&mut self) -> io::Result<()> {
529 /// Helper struct to ensure the buffer is updated after all the writes
530 /// are complete. It tracks the number of written bytes and drains them
531 /// all from the front of the buffer when dropped.
532 struct BufGuard<'a> {
533 buffer: &'a mut Vec<u8>,
537 impl<'a> BufGuard<'a> {
538 fn new(buffer: &'a mut Vec<u8>) -> Self {
539 Self { buffer, written: 0 }
542 /// The unwritten part of the buffer
543 fn remaining(&self) -> &[u8] {
544 &self.buffer[self.written..]
547 /// Flag some bytes as removed from the front of the buffer
548 fn consume(&mut self, amt: usize) {
552 /// true if all of the bytes have been written
553 fn done(&self) -> bool {
554 self.written >= self.buffer.len()
558 impl Drop for BufGuard<'_> {
560 if self.written > 0 {
561 self.buffer.drain(..self.written);
566 let mut guard = BufGuard::new(&mut self.buf);
567 let inner = self.inner.as_mut().unwrap();
568 while !guard.done() {
569 self.panicked = true;
570 let r = inner.write(guard.remaining());
571 self.panicked = false;
575 return Err(Error::new(
576 ErrorKind::WriteZero,
577 "failed to write the buffered data",
580 Ok(n) => guard.consume(n),
581 Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
582 Err(e) => return Err(e),
588 /// Buffer some data without flushing it, regardless of the size of the
589 /// data. Writes as much as possible without exceeding capacity. Returns
590 /// the number of bytes written.
591 fn write_to_buf(&mut self, buf: &[u8]) -> usize {
592 let available = self.buf.capacity() - self.buf.len();
593 let amt_to_buffer = available.min(buf.len());
594 self.buf.extend_from_slice(&buf[..amt_to_buffer]);
598 /// Gets a reference to the underlying writer.
603 /// use std::io::BufWriter;
604 /// use std::net::TcpStream;
606 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
608 /// // we can use reference just like buffer
609 /// let reference = buffer.get_ref();
611 #[stable(feature = "rust1", since = "1.0.0")]
612 pub fn get_ref(&self) -> &W {
613 self.inner.as_ref().unwrap()
616 /// Gets a mutable reference to the underlying writer.
618 /// It is inadvisable to directly write to the underlying writer.
623 /// use std::io::BufWriter;
624 /// use std::net::TcpStream;
626 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
628 /// // we can use reference just like buffer
629 /// let reference = buffer.get_mut();
631 #[stable(feature = "rust1", since = "1.0.0")]
632 pub fn get_mut(&mut self) -> &mut W {
633 self.inner.as_mut().unwrap()
636 /// Returns a reference to the internally buffered data.
641 /// use std::io::BufWriter;
642 /// use std::net::TcpStream;
644 /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
646 /// // See how many bytes are currently buffered
647 /// let bytes_buffered = buf_writer.buffer().len();
649 #[stable(feature = "bufreader_buffer", since = "1.37.0")]
650 pub fn buffer(&self) -> &[u8] {
654 /// Returns the number of bytes the internal buffer can hold without flushing.
659 /// use std::io::BufWriter;
660 /// use std::net::TcpStream;
662 /// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
664 /// // Check the capacity of the inner buffer
665 /// let capacity = buf_writer.capacity();
666 /// // Calculate how many bytes can be written without flushing
667 /// let without_flush = capacity - buf_writer.buffer().len();
669 #[stable(feature = "buffered_io_capacity", since = "1.46.0")]
670 pub fn capacity(&self) -> usize {
674 /// Unwraps this `BufWriter<W>`, returning the underlying writer.
676 /// The buffer is written out before returning the writer.
680 /// An `Err` will be returned if an error occurs while flushing the buffer.
685 /// use std::io::BufWriter;
686 /// use std::net::TcpStream;
688 /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
690 /// // unwrap the TcpStream and flush the buffer
691 /// let stream = buffer.into_inner().unwrap();
693 #[stable(feature = "rust1", since = "1.0.0")]
694 pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
695 match self.flush_buf() {
696 Err(e) => Err(IntoInnerError(self, e)),
697 Ok(()) => Ok(self.inner.take().unwrap()),
702 #[stable(feature = "rust1", since = "1.0.0")]
703 impl<W: Write> Write for BufWriter<W> {
704 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
705 if self.buf.len() + buf.len() > self.buf.capacity() {
708 // FIXME: Why no len > capacity? Why not buffer len == capacity? #72919
709 if buf.len() >= self.buf.capacity() {
710 self.panicked = true;
711 let r = self.get_mut().write(buf);
712 self.panicked = false;
715 self.buf.extend_from_slice(buf);
720 fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
721 // Normally, `write_all` just calls `write` in a loop. We can do better
722 // by calling `self.get_mut().write_all()` directly, which avoids
723 // round trips through the buffer in the event of a series of partial
724 // writes in some circumstances.
725 if self.buf.len() + buf.len() > self.buf.capacity() {
728 // FIXME: Why no len > capacity? Why not buffer len == capacity? #72919
729 if buf.len() >= self.buf.capacity() {
730 self.panicked = true;
731 let r = self.get_mut().write_all(buf);
732 self.panicked = false;
735 self.buf.extend_from_slice(buf);
740 fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
741 let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
742 if self.buf.len() + total_len > self.buf.capacity() {
745 // FIXME: Why no len > capacity? Why not buffer len == capacity? #72919
746 if total_len >= self.buf.capacity() {
747 self.panicked = true;
748 let r = self.get_mut().write_vectored(bufs);
749 self.panicked = false;
752 bufs.iter().for_each(|b| self.buf.extend_from_slice(b));
757 fn is_write_vectored(&self) -> bool {
758 self.get_ref().is_write_vectored()
761 fn flush(&mut self) -> io::Result<()> {
762 self.flush_buf().and_then(|()| self.get_mut().flush())
766 #[stable(feature = "rust1", since = "1.0.0")]
767 impl<W: Write> fmt::Debug for BufWriter<W>
771 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
772 fmt.debug_struct("BufWriter")
773 .field("writer", &self.inner.as_ref().unwrap())
774 .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
779 #[stable(feature = "rust1", since = "1.0.0")]
780 impl<W: Write + Seek> Seek for BufWriter<W> {
781 /// Seek to the offset, in bytes, in the underlying writer.
783 /// Seeking always writes out the internal buffer before seeking.
784 fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
786 self.get_mut().seek(pos)
790 #[stable(feature = "rust1", since = "1.0.0")]
791 impl<W: Write> Drop for BufWriter<W> {
793 if self.inner.is_some() && !self.panicked {
794 // dtors should not panic, so we ignore a failed flush
795 let _r = self.flush_buf();
800 impl<W> IntoInnerError<W> {
801 /// Returns the error which caused the call to `into_inner()` to fail.
803 /// This error was returned when attempting to write the internal buffer.
808 /// use std::io::BufWriter;
809 /// use std::net::TcpStream;
811 /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
813 /// // do stuff with the stream
815 /// // we want to get our `TcpStream` back, so let's try:
817 /// let stream = match stream.into_inner() {
820 /// // Here, e is an IntoInnerError, let's log the inner error.
822 /// // We'll just 'log' to stdout for this example.
823 /// println!("{}", e.error());
825 /// panic!("An unexpected error occurred.");
829 #[stable(feature = "rust1", since = "1.0.0")]
830 pub fn error(&self) -> &Error {
834 /// Returns the buffered writer instance which generated the error.
836 /// The returned object can be used for error recovery, such as
837 /// re-inspecting the buffer.
842 /// use std::io::BufWriter;
843 /// use std::net::TcpStream;
845 /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap());
847 /// // do stuff with the stream
849 /// // we want to get our `TcpStream` back, so let's try:
851 /// let stream = match stream.into_inner() {
854 /// // Here, e is an IntoInnerError, let's re-examine the buffer:
855 /// let buffer = e.into_inner();
857 /// // do stuff to try to recover
859 /// // afterwards, let's just return the stream
860 /// buffer.into_inner().unwrap()
864 #[stable(feature = "rust1", since = "1.0.0")]
865 pub fn into_inner(self) -> W {
870 #[stable(feature = "rust1", since = "1.0.0")]
871 impl<W> From<IntoInnerError<W>> for Error {
872 fn from(iie: IntoInnerError<W>) -> Error {
877 #[stable(feature = "rust1", since = "1.0.0")]
878 impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
879 #[allow(deprecated, deprecated_in_future)]
880 fn description(&self) -> &str {
881 error::Error::description(self.error())
885 #[stable(feature = "rust1", since = "1.0.0")]
886 impl<W> fmt::Display for IntoInnerError<W> {
887 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
892 /// Private helper struct for implementing the line-buffered writing logic.
893 /// This shim temporarily wraps a BufWriter, and uses its internals to
894 /// implement a line-buffered writer (specifically by using the internal
895 /// methods like write_to_buf and flush_buf). In this way, a more
896 /// efficient abstraction can be created than one that only had access to
897 /// `write` and `flush`, without needlessly duplicating a lot of the
898 /// implementation details of BufWriter. This also allows existing
899 /// `BufWriters` to be temporarily given line-buffering logic; this is what
900 /// enables Stdout to be alternately in line-buffered or block-buffered mode.
902 pub(super) struct LineWriterShim<'a, W: Write> {
903 buffer: &'a mut BufWriter<W>,
906 impl<'a, W: Write> LineWriterShim<'a, W> {
907 pub fn new(buffer: &'a mut BufWriter<W>) -> Self {
911 /// Get a mutable reference to the inner writer (that is, the writer
912 /// wrapped by the BufWriter). Be careful with this writer, as writes to
913 /// it will bypass the buffer.
914 fn inner_mut(&mut self) -> &mut W {
915 self.buffer.get_mut()
918 /// Get the content currently buffered in self.buffer
919 fn buffered(&self) -> &[u8] {
923 /// Flush the buffer iff the last byte is a newline (indicating that an
924 /// earlier write only succeeded partially, and we want to retry flushing
925 /// the buffered line before continuing with a subsequent write)
926 fn flush_if_completed_line(&mut self) -> io::Result<()> {
927 match self.buffered().last().copied() {
928 Some(b'\n') => self.buffer.flush_buf(),
934 impl<'a, W: Write> Write for LineWriterShim<'a, W> {
935 /// Write some data into this BufReader with line buffering. This means
936 /// that, if any newlines are present in the data, the data up to the last
937 /// newline is sent directly to the underlying writer, and data after it
938 /// is buffered. Returns the number of bytes written.
940 /// This function operates on a "best effort basis"; in keeping with the
941 /// convention of `Write::write`, it makes at most one attempt to write
942 /// new data to the underlying writer. If that write only reports a partial
943 /// success, the remaining data will be buffered.
945 /// Because this function attempts to send completed lines to the underlying
946 /// writer, it will also flush the existing buffer if it ends with a
947 /// newline, even if the incoming data does not contain any newlines.
948 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
949 let newline_idx = match memchr::memrchr(b'\n', buf) {
950 // If there are no new newlines (that is, if this write is less than
951 // one line), just do a regular buffered write (which may flush if
952 // we exceed the inner buffer's size)
954 self.flush_if_completed_line()?;
955 return self.buffer.write(buf);
957 // Otherwise, arrange for the lines to be written directly to the
959 Some(newline_idx) => newline_idx + 1,
962 // Flush existing content to prepare for our write. We have to do this
963 // before attempting to write `buf` in order to maintain consistency;
964 // if we add `buf` to the buffer then try to flush it all at once,
965 // we're obligated to return Ok(), which would mean suppressing any
966 // errors that occur during flush.
967 self.buffer.flush_buf()?;
969 // This is what we're going to try to write directly to the inner
970 // writer. The rest will be buffered, if nothing goes wrong.
971 let lines = &buf[..newline_idx];
973 // Write `lines` directly to the inner writer. In keeping with the
974 // `write` convention, make at most one attempt to add new (unbuffered)
975 // data. Because this write doesn't touch the BufWriter state directly,
976 // and the buffer is known to be empty, we don't need to worry about
977 // self.buffer.panicked here.
978 let flushed = self.inner_mut().write(lines)?;
980 // If buffer returns Ok(0), propagate that to the caller without
981 // doing additional buffering; otherwise we're just guaranteeing
982 // an "ErrorKind::WriteZero" later.
987 // Now that the write has succeeded, buffer the rest (or as much of
988 // the rest as possible). If there were any unwritten newlines, we
989 // only buffer out to the last unwritten newline that fits in the
990 // buffer; this helps prevent flushing partial lines on subsequent
991 // calls to LineWriterShim::write.
993 // Handle the cases in order of most-common to least-common, under
994 // the presumption that most writes succeed in totality, and that most
995 // writes are smaller than the buffer.
996 // - Is this a partial line (ie, no newlines left in the unwritten tail)
997 // - If not, does the data out to the last unwritten newline fit in
999 // - If not, scan for the last newline that *does* fit in the buffer
1000 let tail = if flushed >= newline_idx {
1002 } else if newline_idx - flushed <= self.buffer.capacity() {
1003 &buf[flushed..newline_idx]
1005 let scan_area = &buf[flushed..];
1006 let scan_area = &scan_area[..self.buffer.capacity()];
1007 match memchr::memrchr(b'\n', scan_area) {
1008 Some(newline_idx) => &scan_area[..newline_idx + 1],
1013 let buffered = self.buffer.write_to_buf(tail);
1014 Ok(flushed + buffered)
1017 fn flush(&mut self) -> io::Result<()> {
1021 /// Write some vectored data into this BufReader with line buffering. This
1022 /// means that, if any newlines are present in the data, the data up to
1023 /// and including the buffer containing the last newline is sent directly
1024 /// to the inner writer, and the data after it is buffered. Returns the
1025 /// number of bytes written.
1027 /// This function operates on a "best effort basis"; in keeping with the
1028 /// convention of `Write::write`, it makes at most one attempt to write
1029 /// new data to the underlying writer.
1031 /// Because this function attempts to send completed lines to the underlying
1032 /// writer, it will also flush the existing buffer if it contains any
1035 /// Because sorting through an array of `IoSlice` can be a bit convoluted,
1036 /// This method differs from write in the following ways:
1038 /// - It attempts to write the full content of all the buffers up to and
1039 /// including the one containing the last newline. This means that it
1040 /// may attempt to write a partial line, that buffer has data past the
1042 /// - If the write only reports partial success, it does not attempt to
1043 /// find the precise location of the written bytes and buffer the rest.
1045 /// If the underlying vector doesn't support vectored writing, we instead
1046 /// simply write the first non-empty buffer with `write`. This way, we
1047 /// get the benefits of more granular partial-line handling without losing
1048 /// anything in efficiency
1049 fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
1050 // If there's no specialized behavior for write_vectored, just use
1051 // write. This has the benefit of more granular partial-line handling.
1052 if !self.is_write_vectored() {
1053 return match bufs.iter().find(|buf| !buf.is_empty()) {
1054 Some(buf) => self.write(buf),
1059 // Find the buffer containing the last newline
1060 let last_newline_buf_idx = bufs
1064 .find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i));
1066 // If there are no new newlines (that is, if this write is less than
1067 // one line), just do a regular buffered write
1068 let last_newline_buf_idx = match last_newline_buf_idx {
1069 // No newlines; just do a normal buffered write
1071 self.flush_if_completed_line()?;
1072 return self.buffer.write_vectored(bufs);
1077 // Flush existing content to prepare for our write
1078 self.buffer.flush_buf()?;
1080 // This is what we're going to try to write directly to the inner
1081 // writer. The rest will be buffered, if nothing goes wrong.
1082 let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1);
1084 // Write `lines` directly to the inner writer. In keeping with the
1085 // `write` convention, make at most one attempt to add new (unbuffered)
1086 // data. Because this write doesn't touch the BufWriter state directly,
1087 // and the buffer is known to be empty, we don't need to worry about
1088 // self.panicked here.
1089 let flushed = self.inner_mut().write_vectored(lines)?;
1091 // If inner returns Ok(0), propagate that to the caller without
1092 // doing additional buffering; otherwise we're just guaranteeing
1093 // an "ErrorKind::WriteZero" later.
1098 // Don't try to reconstruct the exact amount written; just bail
1099 // in the event of a partial write
1100 let lines_len = lines.iter().map(|buf| buf.len()).sum();
1101 if flushed < lines_len {
1105 // Now that the write has succeeded, buffer the rest (or as much of the
1106 // rest as possible)
1107 let buffered: usize = tail
1109 .filter(|buf| !buf.is_empty())
1110 .map(|buf| self.buffer.write_to_buf(buf))
1111 .take_while(|&n| n > 0)
1114 Ok(flushed + buffered)
1117 fn is_write_vectored(&self) -> bool {
1118 self.buffer.is_write_vectored()
1121 /// Write some data into this BufReader with line buffering. This means
1122 /// that, if any newlines are present in the data, the data up to the last
1123 /// newline is sent directly to the underlying writer, and data after it
1126 /// Because this function attempts to send completed lines to the underlying
1127 /// writer, it will also flush the existing buffer if it contains any
1128 /// newlines, even if the incoming data does not contain any newlines.
1129 fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
1130 match memchr::memrchr(b'\n', buf) {
1131 // If there are no new newlines (that is, if this write is less than
1132 // one line), just do a regular buffered write (which may flush if
1133 // we exceed the inner buffer's size)
1135 self.flush_if_completed_line()?;
1136 self.buffer.write_all(buf)
1138 Some(newline_idx) => {
1139 let (lines, tail) = buf.split_at(newline_idx + 1);
1141 if self.buffered().is_empty() {
1142 self.inner_mut().write_all(lines)?;
1144 // If there is any buffered data, we add the incoming lines
1145 // to that buffer before flushing, which saves us at least
1146 // one write call. We can't really do this with `write`,
1147 // since we can't do this *and* not suppress errors *and*
1148 // report a consistent state to the caller in a return
1149 // value, but here in write_all it's fine.
1150 self.buffer.write_all(lines)?;
1151 self.buffer.flush_buf()?;
1154 self.buffer.write_all(tail)
1160 /// Wraps a writer and buffers output to it, flushing whenever a newline
1161 /// (`0x0a`, `'\n'`) is detected.
1163 /// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
1164 /// But it only does this batched write when it goes out of scope, or when the
1165 /// internal buffer is full. Sometimes, you'd prefer to write each line as it's
1166 /// completed, rather than the entire buffer at once. Enter `LineWriter`. It
1167 /// does exactly that.
1169 /// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
1170 /// `LineWriter` goes out of scope or when its internal buffer is full.
1172 /// [bufwriter]: struct.BufWriter.html
1174 /// If there's still a partial line in the buffer when the `LineWriter` is
1175 /// dropped, it will flush those contents.
1179 /// We can use `LineWriter` to write one line at a time, significantly
1180 /// reducing the number of actual writes to the file.
1183 /// use std::fs::{self, File};
1184 /// use std::io::prelude::*;
1185 /// use std::io::LineWriter;
1187 /// fn main() -> std::io::Result<()> {
1188 /// let road_not_taken = b"I shall be telling this with a sigh
1189 /// Somewhere ages and ages hence:
1190 /// Two roads diverged in a wood, and I -
1191 /// I took the one less traveled by,
1192 /// And that has made all the difference.";
1194 /// let file = File::create("poem.txt")?;
1195 /// let mut file = LineWriter::new(file);
1197 /// file.write_all(b"I shall be telling this with a sigh")?;
1199 /// // No bytes are written until a newline is encountered (or
1200 /// // the internal buffer is filled).
1201 /// assert_eq!(fs::read_to_string("poem.txt")?, "");
1202 /// file.write_all(b"\n")?;
1204 /// fs::read_to_string("poem.txt")?,
1205 /// "I shall be telling this with a sigh\n",
1208 /// // Write the rest of the poem.
1209 /// file.write_all(b"Somewhere ages and ages hence:
1210 /// Two roads diverged in a wood, and I -
1211 /// I took the one less traveled by,
1212 /// And that has made all the difference.")?;
1214 /// // The last line of the poem doesn't end in a newline, so
1215 /// // we have to flush or drop the `LineWriter` to finish
1219 /// // Confirm the whole poem was written.
1220 /// assert_eq!(fs::read("poem.txt")?, &road_not_taken[..]);
1224 #[stable(feature = "rust1", since = "1.0.0")]
1225 pub struct LineWriter<W: Write> {
1226 inner: BufWriter<W>,
1229 impl<W: Write> LineWriter<W> {
1230 /// Creates a new `LineWriter`.
1235 /// use std::fs::File;
1236 /// use std::io::LineWriter;
1238 /// fn main() -> std::io::Result<()> {
1239 /// let file = File::create("poem.txt")?;
1240 /// let file = LineWriter::new(file);
1244 #[stable(feature = "rust1", since = "1.0.0")]
1245 pub fn new(inner: W) -> LineWriter<W> {
1246 // Lines typically aren't that long, don't use a giant buffer
1247 LineWriter::with_capacity(1024, inner)
1250 /// Creates a new `LineWriter` with a specified capacity for the internal
1256 /// use std::fs::File;
1257 /// use std::io::LineWriter;
1259 /// fn main() -> std::io::Result<()> {
1260 /// let file = File::create("poem.txt")?;
1261 /// let file = LineWriter::with_capacity(100, file);
1265 #[stable(feature = "rust1", since = "1.0.0")]
1266 pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
1267 LineWriter { inner: BufWriter::with_capacity(capacity, inner) }
1270 /// Gets a reference to the underlying writer.
1275 /// use std::fs::File;
1276 /// use std::io::LineWriter;
1278 /// fn main() -> std::io::Result<()> {
1279 /// let file = File::create("poem.txt")?;
1280 /// let file = LineWriter::new(file);
1282 /// let reference = file.get_ref();
1286 #[stable(feature = "rust1", since = "1.0.0")]
1287 pub fn get_ref(&self) -> &W {
1288 self.inner.get_ref()
1291 /// Gets a mutable reference to the underlying writer.
1293 /// Caution must be taken when calling methods on the mutable reference
1294 /// returned as extra writes could corrupt the output stream.
1299 /// use std::fs::File;
1300 /// use std::io::LineWriter;
1302 /// fn main() -> std::io::Result<()> {
1303 /// let file = File::create("poem.txt")?;
1304 /// let mut file = LineWriter::new(file);
1306 /// // we can use reference just like file
1307 /// let reference = file.get_mut();
1311 #[stable(feature = "rust1", since = "1.0.0")]
1312 pub fn get_mut(&mut self) -> &mut W {
1313 self.inner.get_mut()
1316 /// Unwraps this `LineWriter`, returning the underlying writer.
1318 /// The internal buffer is written out before returning the writer.
1322 /// An `Err` will be returned if an error occurs while flushing the buffer.
1327 /// use std::fs::File;
1328 /// use std::io::LineWriter;
1330 /// fn main() -> std::io::Result<()> {
1331 /// let file = File::create("poem.txt")?;
1333 /// let writer: LineWriter<File> = LineWriter::new(file);
1335 /// let file: File = writer.into_inner()?;
1339 #[stable(feature = "rust1", since = "1.0.0")]
1340 pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
1343 .map_err(|IntoInnerError(buf, e)| IntoInnerError(LineWriter { inner: buf }, e))
1347 #[stable(feature = "rust1", since = "1.0.0")]
1348 impl<W: Write> Write for LineWriter<W> {
1349 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
1350 LineWriterShim::new(&mut self.inner).write(buf)
1353 fn flush(&mut self) -> io::Result<()> {
1357 fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
1358 LineWriterShim::new(&mut self.inner).write_vectored(bufs)
1361 fn is_write_vectored(&self) -> bool {
1362 self.inner.is_write_vectored()
1365 fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
1366 LineWriterShim::new(&mut self.inner).write_all(buf)
1369 fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
1370 LineWriterShim::new(&mut self.inner).write_all_vectored(bufs)
1373 fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
1374 LineWriterShim::new(&mut self.inner).write_fmt(fmt)
1378 #[stable(feature = "rust1", since = "1.0.0")]
1379 impl<W: Write> fmt::Debug for LineWriter<W>
1383 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1384 fmt.debug_struct("LineWriter")
1385 .field("writer", &self.inner.inner)
1388 &format_args!("{}/{}", self.inner.buf.len(), self.inner.buf.capacity()),
1396 use crate::io::prelude::*;
1397 use crate::io::{self, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, SeekFrom};
1398 use crate::sync::atomic::{AtomicUsize, Ordering};
1401 /// A dummy reader intended at testing short-reads propagation.
1402 pub struct ShortReader {
1403 lengths: Vec<usize>,
1406 // FIXME: rustfmt and tidy disagree about the correct formatting of this
1407 // function. This leads to issues for users with editors configured to
1409 impl Read for ShortReader {
1410 fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
1411 if self.lengths.is_empty() {
1414 Ok(self.lengths.remove(0))
1420 fn test_buffered_reader() {
1421 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
1422 let mut reader = BufReader::with_capacity(2, inner);
1424 let mut buf = [0, 0, 0];
1425 let nread = reader.read(&mut buf);
1426 assert_eq!(nread.unwrap(), 3);
1427 assert_eq!(buf, [5, 6, 7]);
1428 assert_eq!(reader.buffer(), []);
1430 let mut buf = [0, 0];
1431 let nread = reader.read(&mut buf);
1432 assert_eq!(nread.unwrap(), 2);
1433 assert_eq!(buf, [0, 1]);
1434 assert_eq!(reader.buffer(), []);
1437 let nread = reader.read(&mut buf);
1438 assert_eq!(nread.unwrap(), 1);
1439 assert_eq!(buf, [2]);
1440 assert_eq!(reader.buffer(), [3]);
1442 let mut buf = [0, 0, 0];
1443 let nread = reader.read(&mut buf);
1444 assert_eq!(nread.unwrap(), 1);
1445 assert_eq!(buf, [3, 0, 0]);
1446 assert_eq!(reader.buffer(), []);
1448 let nread = reader.read(&mut buf);
1449 assert_eq!(nread.unwrap(), 1);
1450 assert_eq!(buf, [4, 0, 0]);
1451 assert_eq!(reader.buffer(), []);
1453 assert_eq!(reader.read(&mut buf).unwrap(), 0);
1457 fn test_buffered_reader_seek() {
1458 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
1459 let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
1461 assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3));
1462 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
1463 assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(3));
1464 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
1465 assert_eq!(reader.seek(SeekFrom::Current(1)).ok(), Some(4));
1466 assert_eq!(reader.fill_buf().ok(), Some(&[1, 2][..]));
1468 assert_eq!(reader.seek(SeekFrom::Current(-2)).ok(), Some(3));
1472 fn test_buffered_reader_seek_relative() {
1473 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
1474 let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
1476 assert!(reader.seek_relative(3).is_ok());
1477 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
1478 assert!(reader.seek_relative(0).is_ok());
1479 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
1480 assert!(reader.seek_relative(1).is_ok());
1481 assert_eq!(reader.fill_buf().ok(), Some(&[1][..]));
1482 assert!(reader.seek_relative(-1).is_ok());
1483 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
1484 assert!(reader.seek_relative(2).is_ok());
1485 assert_eq!(reader.fill_buf().ok(), Some(&[2, 3][..]));
1489 fn test_buffered_reader_invalidated_after_read() {
1490 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
1491 let mut reader = BufReader::with_capacity(3, io::Cursor::new(inner));
1493 assert_eq!(reader.fill_buf().ok(), Some(&[5, 6, 7][..]));
1496 let mut buffer = [0, 0, 0, 0, 0];
1497 assert_eq!(reader.read(&mut buffer).ok(), Some(5));
1498 assert_eq!(buffer, [0, 1, 2, 3, 4]);
1500 assert!(reader.seek_relative(-2).is_ok());
1501 let mut buffer = [0, 0];
1502 assert_eq!(reader.read(&mut buffer).ok(), Some(2));
1503 assert_eq!(buffer, [3, 4]);
1507 fn test_buffered_reader_invalidated_after_seek() {
1508 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
1509 let mut reader = BufReader::with_capacity(3, io::Cursor::new(inner));
1511 assert_eq!(reader.fill_buf().ok(), Some(&[5, 6, 7][..]));
1514 assert!(reader.seek(SeekFrom::Current(5)).is_ok());
1516 assert!(reader.seek_relative(-2).is_ok());
1517 let mut buffer = [0, 0];
1518 assert_eq!(reader.read(&mut buffer).ok(), Some(2));
1519 assert_eq!(buffer, [3, 4]);
1523 fn test_buffered_reader_seek_underflow() {
1524 // gimmick reader that yields its position modulo 256 for each byte
1525 struct PositionReader {
1528 impl Read for PositionReader {
1529 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
1530 let len = buf.len();
1532 *x = self.pos as u8;
1533 self.pos = self.pos.wrapping_add(1);
1538 impl Seek for PositionReader {
1539 fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
1541 SeekFrom::Start(n) => {
1544 SeekFrom::Current(n) => {
1545 self.pos = self.pos.wrapping_add(n as u64);
1547 SeekFrom::End(n) => {
1548 self.pos = u64::MAX.wrapping_add(n as u64);
1555 let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 });
1556 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));
1557 assert_eq!(reader.seek(SeekFrom::End(-5)).ok(), Some(u64::MAX - 5));
1558 assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));
1559 // the following seek will require two underlying seeks
1560 let expected = 9223372036854775802;
1561 assert_eq!(reader.seek(SeekFrom::Current(i64::MIN)).ok(), Some(expected));
1562 assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));
1563 // seeking to 0 should empty the buffer.
1564 assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(expected));
1565 assert_eq!(reader.get_ref().pos, expected);
1569 fn test_buffered_reader_seek_underflow_discard_buffer_between_seeks() {
1570 // gimmick reader that returns Err after first seek
1571 struct ErrAfterFirstSeekReader {
1574 impl Read for ErrAfterFirstSeekReader {
1575 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
1576 for x in &mut *buf {
1582 impl Seek for ErrAfterFirstSeekReader {
1583 fn seek(&mut self, _: SeekFrom) -> io::Result<u64> {
1584 if self.first_seek {
1585 self.first_seek = false;
1588 Err(io::Error::new(io::ErrorKind::Other, "oh no!"))
1593 let mut reader = BufReader::with_capacity(5, ErrAfterFirstSeekReader { first_seek: true });
1594 assert_eq!(reader.fill_buf().ok(), Some(&[0, 0, 0, 0, 0][..]));
1596 // The following seek will require two underlying seeks. The first will
1597 // succeed but the second will fail. This should still invalidate the
1599 assert!(reader.seek(SeekFrom::Current(i64::MIN)).is_err());
1600 assert_eq!(reader.buffer().len(), 0);
1604 fn test_buffered_writer() {
1605 let inner = Vec::new();
1606 let mut writer = BufWriter::with_capacity(2, inner);
1608 writer.write(&[0, 1]).unwrap();
1609 assert_eq!(writer.buffer(), []);
1610 assert_eq!(*writer.get_ref(), [0, 1]);
1612 writer.write(&[2]).unwrap();
1613 assert_eq!(writer.buffer(), [2]);
1614 assert_eq!(*writer.get_ref(), [0, 1]);
1616 writer.write(&[3]).unwrap();
1617 assert_eq!(writer.buffer(), [2, 3]);
1618 assert_eq!(*writer.get_ref(), [0, 1]);
1620 writer.flush().unwrap();
1621 assert_eq!(writer.buffer(), []);
1622 assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
1624 writer.write(&[4]).unwrap();
1625 writer.write(&[5]).unwrap();
1626 assert_eq!(writer.buffer(), [4, 5]);
1627 assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
1629 writer.write(&[6]).unwrap();
1630 assert_eq!(writer.buffer(), [6]);
1631 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]);
1633 writer.write(&[7, 8]).unwrap();
1634 assert_eq!(writer.buffer(), []);
1635 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]);
1637 writer.write(&[9, 10, 11]).unwrap();
1638 assert_eq!(writer.buffer(), []);
1639 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
1641 writer.flush().unwrap();
1642 assert_eq!(writer.buffer(), []);
1643 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
1647 fn test_buffered_writer_inner_flushes() {
1648 let mut w = BufWriter::with_capacity(3, Vec::new());
1649 w.write(&[0, 1]).unwrap();
1650 assert_eq!(*w.get_ref(), []);
1651 let w = w.into_inner().unwrap();
1652 assert_eq!(w, [0, 1]);
1656 fn test_buffered_writer_seek() {
1657 let mut w = BufWriter::with_capacity(3, io::Cursor::new(Vec::new()));
1658 w.write_all(&[0, 1, 2, 3, 4, 5]).unwrap();
1659 w.write_all(&[6, 7]).unwrap();
1660 assert_eq!(w.seek(SeekFrom::Current(0)).ok(), Some(8));
1661 assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
1662 assert_eq!(w.seek(SeekFrom::Start(2)).ok(), Some(2));
1663 w.write_all(&[8, 9]).unwrap();
1664 assert_eq!(&w.into_inner().unwrap().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
1668 fn test_read_until() {
1669 let inner: &[u8] = &[0, 1, 2, 1, 0];
1670 let mut reader = BufReader::with_capacity(2, inner);
1671 let mut v = Vec::new();
1672 reader.read_until(0, &mut v).unwrap();
1675 reader.read_until(2, &mut v).unwrap();
1676 assert_eq!(v, [1, 2]);
1678 reader.read_until(1, &mut v).unwrap();
1681 reader.read_until(8, &mut v).unwrap();
1684 reader.read_until(9, &mut v).unwrap();
1689 fn test_line_buffer() {
1690 let mut writer = LineWriter::new(Vec::new());
1691 writer.write(&[0]).unwrap();
1692 assert_eq!(*writer.get_ref(), []);
1693 writer.write(&[1]).unwrap();
1694 assert_eq!(*writer.get_ref(), []);
1695 writer.flush().unwrap();
1696 assert_eq!(*writer.get_ref(), [0, 1]);
1697 writer.write(&[0, b'\n', 1, b'\n', 2]).unwrap();
1698 assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']);
1699 writer.flush().unwrap();
1700 assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]);
1701 writer.write(&[3, b'\n']).unwrap();
1702 assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']);
1706 fn test_read_line() {
1707 let in_buf: &[u8] = b"a\nb\nc";
1708 let mut reader = BufReader::with_capacity(2, in_buf);
1709 let mut s = String::new();
1710 reader.read_line(&mut s).unwrap();
1711 assert_eq!(s, "a\n");
1713 reader.read_line(&mut s).unwrap();
1714 assert_eq!(s, "b\n");
1716 reader.read_line(&mut s).unwrap();
1719 reader.read_line(&mut s).unwrap();
1725 let in_buf: &[u8] = b"a\nb\nc";
1726 let reader = BufReader::with_capacity(2, in_buf);
1727 let mut it = reader.lines();
1728 assert_eq!(it.next().unwrap().unwrap(), "a".to_string());
1729 assert_eq!(it.next().unwrap().unwrap(), "b".to_string());
1730 assert_eq!(it.next().unwrap().unwrap(), "c".to_string());
1731 assert!(it.next().is_none());
1735 fn test_short_reads() {
1736 let inner = ShortReader { lengths: vec![0, 1, 2, 0, 1, 0] };
1737 let mut reader = BufReader::new(inner);
1738 let mut buf = [0, 0];
1739 assert_eq!(reader.read(&mut buf).unwrap(), 0);
1740 assert_eq!(reader.read(&mut buf).unwrap(), 1);
1741 assert_eq!(reader.read(&mut buf).unwrap(), 2);
1742 assert_eq!(reader.read(&mut buf).unwrap(), 0);
1743 assert_eq!(reader.read(&mut buf).unwrap(), 1);
1744 assert_eq!(reader.read(&mut buf).unwrap(), 0);
1745 assert_eq!(reader.read(&mut buf).unwrap(), 0);
1750 fn dont_panic_in_drop_on_panicked_flush() {
1751 struct FailFlushWriter;
1753 impl Write for FailFlushWriter {
1754 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
1757 fn flush(&mut self) -> io::Result<()> {
1758 Err(io::Error::last_os_error())
1762 let writer = FailFlushWriter;
1763 let _writer = BufWriter::new(writer);
1765 // If writer panics *again* due to the flush error then the process will
1771 #[cfg_attr(target_os = "emscripten", ignore)]
1772 fn panic_in_write_doesnt_flush_in_drop() {
1773 static WRITES: AtomicUsize = AtomicUsize::new(0);
1777 impl Write for PanicWriter {
1778 fn write(&mut self, _: &[u8]) -> io::Result<usize> {
1779 WRITES.fetch_add(1, Ordering::SeqCst);
1782 fn flush(&mut self) -> io::Result<()> {
1788 let mut writer = BufWriter::new(PanicWriter);
1789 let _ = writer.write(b"hello world");
1790 let _ = writer.flush();
1795 assert_eq!(WRITES.load(Ordering::SeqCst), 1);
1799 fn bench_buffered_reader(b: &mut test::Bencher) {
1800 b.iter(|| BufReader::new(io::empty()));
1804 fn bench_buffered_writer(b: &mut test::Bencher) {
1805 b.iter(|| BufWriter::new(io::sink()));
1808 /// A simple `Write` target, designed to be wrapped by `LineWriter` /
1809 /// `BufWriter` / etc, that can have its `write` & `flush` behavior
1811 #[derive(Default, Clone)]
1812 struct ProgrammableSink {
1813 // Writes append to this slice
1814 pub buffer: Vec<u8>,
1816 // Flush sets this flag
1819 // If true, writes will always be an error
1820 pub always_write_error: bool,
1822 // If true, flushes will always be an error
1823 pub always_flush_error: bool,
1825 // If set, only up to this number of bytes will be written in a single
1827 pub accept_prefix: Option<usize>,
1829 // If set, counts down with each write, and writes return an error
1831 pub max_writes: Option<usize>,
1833 // If set, attempting to write when max_writes == Some(0) will be an
1834 // error; otherwise, it will return Ok(0).
1835 pub error_after_max_writes: bool,
1838 impl Write for ProgrammableSink {
1839 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1840 if self.always_write_error {
1841 return Err(io::Error::new(io::ErrorKind::Other, "test - always_write_error"));
1844 match self.max_writes {
1845 Some(0) if self.error_after_max_writes => {
1846 return Err(io::Error::new(io::ErrorKind::Other, "test - max_writes"));
1848 Some(0) => return Ok(0),
1849 Some(ref mut count) => *count -= 1,
1853 let len = match self.accept_prefix {
1855 Some(prefix) => data.len().min(prefix),
1858 let data = &data[..len];
1859 self.buffer.extend_from_slice(data);
1864 fn flush(&mut self) -> io::Result<()> {
1865 if self.always_flush_error {
1866 Err(io::Error::new(io::ErrorKind::Other, "test - always_flush_error"))
1868 self.flushed = true;
1874 /// Previously the `LineWriter` could successfully write some bytes but
1875 /// then fail to report that it has done so. Additionally, an erroneous
1876 /// flush after a successful write was permanently ignored.
1878 /// Test that a line writer correctly reports the number of written bytes,
1879 /// and that it attempts to flush buffered lines from previous writes
1880 /// before processing new data
1882 /// Regression test for #37807
1884 fn erroneous_flush_retried() {
1885 let writer = ProgrammableSink {
1886 // Only write up to 4 bytes at a time
1887 accept_prefix: Some(4),
1889 // Accept the first two writes, then error the others
1890 max_writes: Some(2),
1891 error_after_max_writes: true,
1893 ..Default::default()
1896 // This should write the first 4 bytes. The rest will be buffered, out
1897 // to the last newline.
1898 let mut writer = LineWriter::new(writer);
1899 assert_eq!(writer.write(b"a\nb\nc\nd\ne").unwrap(), 8);
1901 // This write should attempt to flush "c\nd\n", then buffer "e". No
1902 // errors should happen here because no further writes should be
1903 // attempted against `writer`.
1904 assert_eq!(writer.write(b"e").unwrap(), 1);
1905 assert_eq!(&writer.get_ref().buffer, b"a\nb\nc\nd\n");
1909 fn line_vectored() {
1910 let mut a = LineWriter::new(Vec::new());
1914 IoSlice::new(b"\n"),
1921 assert_eq!(a.get_ref(), b"\n");
1935 assert_eq!(a.get_ref(), b"\n");
1937 assert_eq!(a.get_ref(), b"\nabac");
1938 assert_eq!(a.write_vectored(&[]).unwrap(), 0);
1949 assert_eq!(a.write_vectored(&[IoSlice::new(b"a\nb"),]).unwrap(), 3);
1950 assert_eq!(a.get_ref(), b"\nabaca\nb");
1954 fn line_vectored_partial_and_errors() {
1955 use crate::collections::VecDeque;
1958 Write { inputs: Vec<&'static [u8]>, output: io::Result<usize> },
1959 Flush { output: io::Result<()> },
1964 calls: VecDeque<Call>,
1967 impl Write for Writer {
1968 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
1969 self.write_vectored(&[IoSlice::new(buf)])
1972 fn write_vectored(&mut self, buf: &[IoSlice<'_>]) -> io::Result<usize> {
1973 match self.calls.pop_front().expect("unexpected call to write") {
1974 Call::Write { inputs, output } => {
1975 assert_eq!(inputs, buf.iter().map(|b| &**b).collect::<Vec<_>>());
1978 Call::Flush { .. } => panic!("unexpected call to write; expected a flush"),
1982 fn is_write_vectored(&self) -> bool {
1986 fn flush(&mut self) -> io::Result<()> {
1987 match self.calls.pop_front().expect("Unexpected call to flush") {
1988 Call::Flush { output } => output,
1989 Call::Write { .. } => panic!("unexpected call to flush; expected a write"),
1994 impl Drop for Writer {
1995 fn drop(&mut self) {
1996 if !thread::panicking() {
1997 assert_eq!(self.calls.len(), 0);
2002 // partial writes keep going
2003 let mut a = LineWriter::new(Writer::default());
2004 a.write_vectored(&[IoSlice::new(&[]), IoSlice::new(b"abc")]).unwrap();
2006 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"abc"], output: Ok(1) });
2007 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"bc"], output: Ok(2) });
2008 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"x", b"\n"], output: Ok(2) });
2010 a.write_vectored(&[IoSlice::new(b"x"), IoSlice::new(b"\n")]).unwrap();
2012 a.get_mut().calls.push_back(Call::Flush { output: Ok(()) });
2015 // erroneous writes stop and don't write more
2016 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"x", b"\na"], output: Err(err()) });
2017 a.get_mut().calls.push_back(Call::Flush { output: Ok(()) });
2018 assert!(a.write_vectored(&[IoSlice::new(b"x"), IoSlice::new(b"\na")]).is_err());
2021 fn err() -> io::Error {
2022 io::Error::new(io::ErrorKind::Other, "x")
2026 /// Test that, in cases where vectored writing is not enabled, the
2027 /// LineWriter uses the normal `write` call, which more-correctly handles
2030 fn line_vectored_ignored() {
2031 let writer = ProgrammableSink::default();
2032 let mut writer = LineWriter::new(writer);
2036 IoSlice::new(b"Line 1\nLine"),
2037 IoSlice::new(b" 2\nLine 3\nL"),
2040 IoSlice::new(b"ine 4"),
2041 IoSlice::new(b"\nLine 5\n"),
2044 let count = writer.write_vectored(&content).unwrap();
2045 assert_eq!(count, 11);
2046 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
2048 let count = writer.write_vectored(&content[2..]).unwrap();
2049 assert_eq!(count, 11);
2050 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
2052 let count = writer.write_vectored(&content[5..]).unwrap();
2053 assert_eq!(count, 5);
2054 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
2056 let count = writer.write_vectored(&content[6..]).unwrap();
2057 assert_eq!(count, 8);
2059 writer.get_ref().buffer.as_slice(),
2060 b"Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n".as_ref()
2064 /// Test that, given this input:
2071 /// And given a result that only writes to midway through Line 2
2073 /// That only up to the end of Line 3 is buffered
2075 /// This behavior is desirable because it prevents flushing partial lines
2077 fn partial_write_buffers_line() {
2078 let writer = ProgrammableSink { accept_prefix: Some(13), ..Default::default() };
2079 let mut writer = LineWriter::new(writer);
2081 assert_eq!(writer.write(b"Line 1\nLine 2\nLine 3\nLine4").unwrap(), 21);
2082 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2");
2084 assert_eq!(writer.write(b"Line 4").unwrap(), 6);
2085 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
2088 /// Test that, given this input:
2094 /// And given that the full write of lines 1 and 2 was successful
2095 /// That data up to Line 3 is buffered
2097 fn partial_line_buffered_after_line_write() {
2098 let writer = ProgrammableSink::default();
2099 let mut writer = LineWriter::new(writer);
2101 assert_eq!(writer.write(b"Line 1\nLine 2\nLine 3").unwrap(), 20);
2102 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\n");
2104 assert!(writer.flush().is_ok());
2105 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3");
2108 /// Test that, given a partial line that exceeds the length of
2109 /// LineBuffer's buffer (that is, without a trailing newline), that that
2110 /// line is written to the inner writer
2112 fn long_line_flushed() {
2113 let writer = ProgrammableSink::default();
2114 let mut writer = LineWriter::with_capacity(5, writer);
2116 assert_eq!(writer.write(b"0123456789").unwrap(), 10);
2117 assert_eq!(&writer.get_ref().buffer, b"0123456789");
2120 /// Test that, given a very long partial line *after* successfully
2121 /// flushing a complete line, that that line is buffered unconditionally,
2122 /// and no additional writes take place. This assures the property that
2123 /// `write` should make at-most-one attempt to write new data.
2125 fn line_long_tail_not_flushed() {
2126 let writer = ProgrammableSink::default();
2127 let mut writer = LineWriter::with_capacity(5, writer);
2129 // Assert that Line 1\n is flushed, and 01234 is buffered
2130 assert_eq!(writer.write(b"Line 1\n0123456789").unwrap(), 12);
2131 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
2133 // Because the buffer is full, this subsequent write will flush it
2134 assert_eq!(writer.write(b"5").unwrap(), 1);
2135 assert_eq!(&writer.get_ref().buffer, b"Line 1\n01234");
2138 /// Test that, if an attempt to pre-flush buffered data returns Ok(0),
2139 /// this is propagated as an error.
2141 fn line_buffer_write0_error() {
2142 let writer = ProgrammableSink {
2143 // Accept one write, then return Ok(0) on subsequent ones
2144 max_writes: Some(1),
2146 ..Default::default()
2148 let mut writer = LineWriter::new(writer);
2150 // This should write "Line 1\n" and buffer "Partial"
2151 assert_eq!(writer.write(b"Line 1\nPartial").unwrap(), 14);
2152 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
2154 // This will attempt to flush "partial", which will return Ok(0), which
2155 // needs to be an error, because we've already informed the client
2156 // that we accepted the write.
2157 let err = writer.write(b" Line End\n").unwrap_err();
2158 assert_eq!(err.kind(), ErrorKind::WriteZero);
2159 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
2162 /// Test that, if a write returns Ok(0) after a successful pre-flush, this
2163 /// is propagated as Ok(0)
2165 fn line_buffer_write0_normal() {
2166 let writer = ProgrammableSink {
2167 // Accept two writes, then return Ok(0) on subsequent ones
2168 max_writes: Some(2),
2170 ..Default::default()
2172 let mut writer = LineWriter::new(writer);
2174 // This should write "Line 1\n" and buffer "Partial"
2175 assert_eq!(writer.write(b"Line 1\nPartial").unwrap(), 14);
2176 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
2178 // This will flush partial, which will succeed, but then return Ok(0)
2179 // when flushing " Line End\n"
2180 assert_eq!(writer.write(b" Line End\n").unwrap(), 0);
2181 assert_eq!(&writer.get_ref().buffer, b"Line 1\nPartial");
2184 /// LineWriter has a custom `write_all`; make sure it works correctly
2186 fn line_write_all() {
2187 let writer = ProgrammableSink {
2188 // Only write 5 bytes at a time
2189 accept_prefix: Some(5),
2190 ..Default::default()
2192 let mut writer = LineWriter::new(writer);
2194 writer.write_all(b"Line 1\nLine 2\nLine 3\nLine 4\nPartial").unwrap();
2195 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\nLine 4\n");
2196 writer.write_all(b" Line 5\n").unwrap();
2198 writer.get_ref().buffer.as_slice(),
2199 b"Line 1\nLine 2\nLine 3\nLine 4\nPartial Line 5\n".as_ref(),
2204 fn line_write_all_error() {
2205 let writer = ProgrammableSink {
2206 // Only accept up to 3 writes of up to 5 bytes each
2207 accept_prefix: Some(5),
2208 max_writes: Some(3),
2209 ..Default::default()
2212 let mut writer = LineWriter::new(writer);
2213 let res = writer.write_all(b"Line 1\nLine 2\nLine 3\nLine 4\nPartial");
2214 assert!(res.is_err());
2215 // An error from write_all leaves everything in an indeterminate state,
2216 // so there's nothing else to test here
2219 /// Under certain circumstances, the old implementation of LineWriter
2220 /// would try to buffer "to the last newline" but be forced to buffer
2221 /// less than that, leading to inappropriate partial line writes.
2222 /// Regression test for that issue.
2224 fn partial_multiline_buffering() {
2225 let writer = ProgrammableSink {
2226 // Write only up to 5 bytes at a time
2227 accept_prefix: Some(5),
2228 ..Default::default()
2231 let mut writer = LineWriter::with_capacity(10, writer);
2233 let content = b"AAAAABBBBB\nCCCCDDDDDD\nEEE";
2235 // When content is written, LineWriter will try to write blocks A, B,
2236 // C, and D. Only block A will succeed. Under the old behavior, LineWriter
2237 // would then try to buffer B, C and D, but because its capacity is 10,
2238 // it will only be able to buffer B and C. We don't want to buffer
2239 // partial lines concurrent with whole lines, so the correct behavior
2240 // is to buffer only block B (out to the newline)
2241 assert_eq!(writer.write(content).unwrap(), 11);
2242 assert_eq!(writer.get_ref().buffer, *b"AAAAA");
2244 writer.flush().unwrap();
2245 assert_eq!(writer.get_ref().buffer, *b"AAAAABBBBB\n");
2248 /// Same as test_partial_multiline_buffering, but in the event NO full lines
2249 /// fit in the buffer, just buffer as much as possible
2251 fn partial_multiline_buffering_without_full_line() {
2252 let writer = ProgrammableSink {
2253 // Write only up to 5 bytes at a time
2254 accept_prefix: Some(5),
2255 ..Default::default()
2258 let mut writer = LineWriter::with_capacity(5, writer);
2260 let content = b"AAAAABBBBBBBBBB\nCCCCC\nDDDDD";
2262 // When content is written, LineWriter will try to write blocks A, B,
2263 // and C. Only block A will succeed. Under the old behavior, LineWriter
2264 // would then try to buffer B and C, but because its capacity is 5,
2265 // it will only be able to buffer part of B. Because it's not possible
2266 // for it to buffer any complete lines, it should buffer as much of B as
2268 assert_eq!(writer.write(content).unwrap(), 10);
2269 assert_eq!(writer.get_ref().buffer, *b"AAAAA");
2271 writer.flush().unwrap();
2272 assert_eq!(writer.get_ref().buffer, *b"AAAAABBBBB");
2275 #[derive(Debug, Clone, PartialEq, Eq)]
2276 enum RecordedEvent {
2281 #[derive(Debug, Clone, Default)]
2282 struct WriteRecorder {
2283 pub events: Vec<RecordedEvent>,
2286 impl Write for WriteRecorder {
2287 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
2288 use crate::str::from_utf8;
2290 self.events.push(RecordedEvent::Write(from_utf8(buf).unwrap().to_string()));
2294 fn flush(&mut self) -> io::Result<()> {
2295 self.events.push(RecordedEvent::Flush);
2300 /// Test that a normal, formatted writeln only results in a single write
2301 /// call to the underlying writer. A naive implementation of
2302 /// LineWriter::write_all results in two writes: one of the buffered data,
2303 /// and another of the final substring in the formatted set
2305 fn single_formatted_write() {
2306 let writer = WriteRecorder::default();
2307 let mut writer = LineWriter::new(writer);
2309 // Under a naive implementation of LineWriter, this will result in two
2310 // writes: "hello, world" and "!\n", because write() has to flush the
2311 // buffer before attempting to write the last "!\n". write_all shouldn't
2312 // have this limitation.
2313 writeln!(&mut writer, "{}, {}!", "hello", "world").unwrap();
2314 assert_eq!(writer.get_ref().events, [RecordedEvent::Write("hello, world!\n".to_string())]);