1 use crate::io::prelude::*;
2 use crate::io::{self, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, ReadBuf, SeekFrom};
3 use crate::mem::MaybeUninit;
5 use crate::sync::atomic::{AtomicUsize, Ordering};
8 /// A dummy reader intended at testing short-reads propagation.
9 pub struct ShortReader {
13 // FIXME: rustfmt and tidy disagree about the correct formatting of this
14 // function. This leads to issues for users with editors configured to
16 impl Read for ShortReader {
17 fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
18 if self.lengths.is_empty() { Ok(0) } else { Ok(self.lengths.remove(0)) }
23 fn test_buffered_reader() {
24 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
25 let mut reader = BufReader::with_capacity(2, inner);
27 let mut buf = [0, 0, 0];
28 let nread = reader.read(&mut buf);
29 assert_eq!(nread.unwrap(), 3);
30 assert_eq!(buf, [5, 6, 7]);
31 assert_eq!(reader.buffer(), []);
34 let nread = reader.read(&mut buf);
35 assert_eq!(nread.unwrap(), 2);
36 assert_eq!(buf, [0, 1]);
37 assert_eq!(reader.buffer(), []);
40 let nread = reader.read(&mut buf);
41 assert_eq!(nread.unwrap(), 1);
43 assert_eq!(reader.buffer(), [3]);
45 let mut buf = [0, 0, 0];
46 let nread = reader.read(&mut buf);
47 assert_eq!(nread.unwrap(), 1);
48 assert_eq!(buf, [3, 0, 0]);
49 assert_eq!(reader.buffer(), []);
51 let nread = reader.read(&mut buf);
52 assert_eq!(nread.unwrap(), 1);
53 assert_eq!(buf, [4, 0, 0]);
54 assert_eq!(reader.buffer(), []);
56 assert_eq!(reader.read(&mut buf).unwrap(), 0);
60 fn test_buffered_reader_read_buf() {
61 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
62 let mut reader = BufReader::with_capacity(2, inner);
64 let mut buf = [MaybeUninit::uninit(); 3];
65 let mut buf = ReadBuf::uninit(&mut buf);
67 reader.read_buf(&mut buf).unwrap();
69 assert_eq!(buf.filled(), [5, 6, 7]);
70 assert_eq!(reader.buffer(), []);
72 let mut buf = [MaybeUninit::uninit(); 2];
73 let mut buf = ReadBuf::uninit(&mut buf);
75 reader.read_buf(&mut buf).unwrap();
77 assert_eq!(buf.filled(), [0, 1]);
78 assert_eq!(reader.buffer(), []);
80 let mut buf = [MaybeUninit::uninit(); 1];
81 let mut buf = ReadBuf::uninit(&mut buf);
83 reader.read_buf(&mut buf).unwrap();
85 assert_eq!(buf.filled(), [2]);
86 assert_eq!(reader.buffer(), [3]);
88 let mut buf = [MaybeUninit::uninit(); 3];
89 let mut buf = ReadBuf::uninit(&mut buf);
91 reader.read_buf(&mut buf).unwrap();
93 assert_eq!(buf.filled(), [3]);
94 assert_eq!(reader.buffer(), []);
96 reader.read_buf(&mut buf).unwrap();
98 assert_eq!(buf.filled(), [3, 4]);
99 assert_eq!(reader.buffer(), []);
103 reader.read_buf(&mut buf).unwrap();
105 assert_eq!(buf.filled_len(), 0);
109 fn test_buffered_reader_seek() {
110 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
111 let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
113 assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3));
114 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
115 assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(3));
116 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
117 assert_eq!(reader.seek(SeekFrom::Current(1)).ok(), Some(4));
118 assert_eq!(reader.fill_buf().ok(), Some(&[1, 2][..]));
120 assert_eq!(reader.seek(SeekFrom::Current(-2)).ok(), Some(3));
124 fn test_buffered_reader_seek_relative() {
125 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
126 let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
128 assert!(reader.seek_relative(3).is_ok());
129 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
130 assert!(reader.seek_relative(0).is_ok());
131 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
132 assert!(reader.seek_relative(1).is_ok());
133 assert_eq!(reader.fill_buf().ok(), Some(&[1][..]));
134 assert!(reader.seek_relative(-1).is_ok());
135 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
136 assert!(reader.seek_relative(2).is_ok());
137 assert_eq!(reader.fill_buf().ok(), Some(&[2, 3][..]));
141 fn test_buffered_reader_stream_position() {
142 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
143 let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner));
145 assert_eq!(reader.stream_position().ok(), Some(0));
146 assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3));
147 assert_eq!(reader.stream_position().ok(), Some(3));
148 // relative seeking within the buffer and reading position should keep the buffer
149 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..]));
150 assert!(reader.seek_relative(0).is_ok());
151 assert_eq!(reader.stream_position().ok(), Some(3));
152 assert_eq!(reader.buffer(), &[0, 1][..]);
153 assert!(reader.seek_relative(1).is_ok());
154 assert_eq!(reader.stream_position().ok(), Some(4));
155 assert_eq!(reader.buffer(), &[1][..]);
156 assert!(reader.seek_relative(-1).is_ok());
157 assert_eq!(reader.stream_position().ok(), Some(3));
158 assert_eq!(reader.buffer(), &[0, 1][..]);
159 // relative seeking outside the buffer will discard it
160 assert!(reader.seek_relative(2).is_ok());
161 assert_eq!(reader.stream_position().ok(), Some(5));
162 assert_eq!(reader.buffer(), &[][..]);
166 fn test_buffered_reader_stream_position_panic() {
167 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
168 let mut reader = BufReader::with_capacity(4, io::Cursor::new(inner));
170 // cause internal buffer to be filled but read only partially
171 let mut buffer = [0, 0];
172 assert!(reader.read_exact(&mut buffer).is_ok());
173 // rewinding the internal reader will cause buffer to loose sync
174 let inner = reader.get_mut();
175 assert!(inner.seek(SeekFrom::Start(0)).is_ok());
176 // overflow when subtracting the remaining buffer size from current position
177 let result = panic::catch_unwind(panic::AssertUnwindSafe(|| reader.stream_position().ok()));
178 assert!(result.is_err());
182 fn test_buffered_reader_invalidated_after_read() {
183 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
184 let mut reader = BufReader::with_capacity(3, io::Cursor::new(inner));
186 assert_eq!(reader.fill_buf().ok(), Some(&[5, 6, 7][..]));
189 let mut buffer = [0, 0, 0, 0, 0];
190 assert_eq!(reader.read(&mut buffer).ok(), Some(5));
191 assert_eq!(buffer, [0, 1, 2, 3, 4]);
193 assert!(reader.seek_relative(-2).is_ok());
194 let mut buffer = [0, 0];
195 assert_eq!(reader.read(&mut buffer).ok(), Some(2));
196 assert_eq!(buffer, [3, 4]);
200 fn test_buffered_reader_invalidated_after_seek() {
201 let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
202 let mut reader = BufReader::with_capacity(3, io::Cursor::new(inner));
204 assert_eq!(reader.fill_buf().ok(), Some(&[5, 6, 7][..]));
207 assert!(reader.seek(SeekFrom::Current(5)).is_ok());
209 assert!(reader.seek_relative(-2).is_ok());
210 let mut buffer = [0, 0];
211 assert_eq!(reader.read(&mut buffer).ok(), Some(2));
212 assert_eq!(buffer, [3, 4]);
216 fn test_buffered_reader_seek_underflow() {
217 // gimmick reader that yields its position modulo 256 for each byte
218 struct PositionReader {
221 impl Read for PositionReader {
222 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
226 self.pos = self.pos.wrapping_add(1);
231 impl Seek for PositionReader {
232 fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
234 SeekFrom::Start(n) => {
237 SeekFrom::Current(n) => {
238 self.pos = self.pos.wrapping_add(n as u64);
240 SeekFrom::End(n) => {
241 self.pos = u64::MAX.wrapping_add(n as u64);
248 let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 });
249 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..]));
250 assert_eq!(reader.seek(SeekFrom::End(-5)).ok(), Some(u64::MAX - 5));
251 assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));
252 // the following seek will require two underlying seeks
253 let expected = 9223372036854775802;
254 assert_eq!(reader.seek(SeekFrom::Current(i64::MIN)).ok(), Some(expected));
255 assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5));
256 // seeking to 0 should empty the buffer.
257 assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(expected));
258 assert_eq!(reader.get_ref().pos, expected);
262 fn test_buffered_reader_seek_underflow_discard_buffer_between_seeks() {
263 // gimmick reader that returns Err after first seek
264 struct ErrAfterFirstSeekReader {
267 impl Read for ErrAfterFirstSeekReader {
268 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
275 impl Seek for ErrAfterFirstSeekReader {
276 fn seek(&mut self, _: SeekFrom) -> io::Result<u64> {
278 self.first_seek = false;
281 Err(io::Error::new(io::ErrorKind::Other, "oh no!"))
286 let mut reader = BufReader::with_capacity(5, ErrAfterFirstSeekReader { first_seek: true });
287 assert_eq!(reader.fill_buf().ok(), Some(&[0, 0, 0, 0, 0][..]));
289 // The following seek will require two underlying seeks. The first will
290 // succeed but the second will fail. This should still invalidate the
292 assert!(reader.seek(SeekFrom::Current(i64::MIN)).is_err());
293 assert_eq!(reader.buffer().len(), 0);
297 fn test_buffered_reader_read_to_end_consumes_buffer() {
298 let data: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7];
299 let mut reader = BufReader::with_capacity(3, data);
300 let mut buf = Vec::new();
301 assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2][..]));
302 assert_eq!(reader.read_to_end(&mut buf).ok(), Some(8));
303 assert_eq!(&buf, &[0, 1, 2, 3, 4, 5, 6, 7]);
304 assert!(reader.buffer().is_empty());
308 fn test_buffered_reader_read_to_string_consumes_buffer() {
309 let data: &[u8] = "deadbeef".as_bytes();
310 let mut reader = BufReader::with_capacity(3, data);
311 let mut buf = String::new();
312 assert_eq!(reader.fill_buf().ok(), Some("dea".as_bytes()));
313 assert_eq!(reader.read_to_string(&mut buf).ok(), Some(8));
314 assert_eq!(&buf, "deadbeef");
315 assert!(reader.buffer().is_empty());
319 fn test_buffered_writer() {
320 let inner = Vec::new();
321 let mut writer = BufWriter::with_capacity(2, inner);
323 writer.write(&[0, 1]).unwrap();
324 assert_eq!(writer.buffer(), []);
325 assert_eq!(*writer.get_ref(), [0, 1]);
327 writer.write(&[2]).unwrap();
328 assert_eq!(writer.buffer(), [2]);
329 assert_eq!(*writer.get_ref(), [0, 1]);
331 writer.write(&[3]).unwrap();
332 assert_eq!(writer.buffer(), [2, 3]);
333 assert_eq!(*writer.get_ref(), [0, 1]);
335 writer.flush().unwrap();
336 assert_eq!(writer.buffer(), []);
337 assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
339 writer.write(&[4]).unwrap();
340 writer.write(&[5]).unwrap();
341 assert_eq!(writer.buffer(), [4, 5]);
342 assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
344 writer.write(&[6]).unwrap();
345 assert_eq!(writer.buffer(), [6]);
346 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]);
348 writer.write(&[7, 8]).unwrap();
349 assert_eq!(writer.buffer(), []);
350 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]);
352 writer.write(&[9, 10, 11]).unwrap();
353 assert_eq!(writer.buffer(), []);
354 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
356 writer.flush().unwrap();
357 assert_eq!(writer.buffer(), []);
358 assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
362 fn test_buffered_writer_inner_flushes() {
363 let mut w = BufWriter::with_capacity(3, Vec::new());
364 w.write(&[0, 1]).unwrap();
365 assert_eq!(*w.get_ref(), []);
366 let w = w.into_inner().unwrap();
367 assert_eq!(w, [0, 1]);
371 fn test_buffered_writer_seek() {
372 let mut w = BufWriter::with_capacity(3, io::Cursor::new(Vec::new()));
373 w.write_all(&[0, 1, 2, 3, 4, 5]).unwrap();
374 w.write_all(&[6, 7]).unwrap();
375 assert_eq!(w.seek(SeekFrom::Current(0)).ok(), Some(8));
376 assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
377 assert_eq!(w.seek(SeekFrom::Start(2)).ok(), Some(2));
378 w.write_all(&[8, 9]).unwrap();
379 assert_eq!(&w.into_inner().unwrap().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
383 fn test_read_until() {
384 let inner: &[u8] = &[0, 1, 2, 1, 0];
385 let mut reader = BufReader::with_capacity(2, inner);
386 let mut v = Vec::new();
387 reader.read_until(0, &mut v).unwrap();
390 reader.read_until(2, &mut v).unwrap();
391 assert_eq!(v, [1, 2]);
393 reader.read_until(1, &mut v).unwrap();
396 reader.read_until(8, &mut v).unwrap();
399 reader.read_until(9, &mut v).unwrap();
404 fn test_line_buffer() {
405 let mut writer = LineWriter::new(Vec::new());
406 writer.write(&[0]).unwrap();
407 assert_eq!(*writer.get_ref(), []);
408 writer.write(&[1]).unwrap();
409 assert_eq!(*writer.get_ref(), []);
410 writer.flush().unwrap();
411 assert_eq!(*writer.get_ref(), [0, 1]);
412 writer.write(&[0, b'\n', 1, b'\n', 2]).unwrap();
413 assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']);
414 writer.flush().unwrap();
415 assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]);
416 writer.write(&[3, b'\n']).unwrap();
417 assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']);
421 fn test_read_line() {
422 let in_buf: &[u8] = b"a\nb\nc";
423 let mut reader = BufReader::with_capacity(2, in_buf);
424 let mut s = String::new();
425 reader.read_line(&mut s).unwrap();
426 assert_eq!(s, "a\n");
428 reader.read_line(&mut s).unwrap();
429 assert_eq!(s, "b\n");
431 reader.read_line(&mut s).unwrap();
434 reader.read_line(&mut s).unwrap();
440 let in_buf: &[u8] = b"a\nb\nc";
441 let reader = BufReader::with_capacity(2, in_buf);
442 let mut it = reader.lines();
443 assert_eq!(it.next().unwrap().unwrap(), "a".to_string());
444 assert_eq!(it.next().unwrap().unwrap(), "b".to_string());
445 assert_eq!(it.next().unwrap().unwrap(), "c".to_string());
446 assert!(it.next().is_none());
450 fn test_short_reads() {
451 let inner = ShortReader { lengths: vec![0, 1, 2, 0, 1, 0] };
452 let mut reader = BufReader::new(inner);
453 let mut buf = [0, 0];
454 assert_eq!(reader.read(&mut buf).unwrap(), 0);
455 assert_eq!(reader.read(&mut buf).unwrap(), 1);
456 assert_eq!(reader.read(&mut buf).unwrap(), 2);
457 assert_eq!(reader.read(&mut buf).unwrap(), 0);
458 assert_eq!(reader.read(&mut buf).unwrap(), 1);
459 assert_eq!(reader.read(&mut buf).unwrap(), 0);
460 assert_eq!(reader.read(&mut buf).unwrap(), 0);
465 fn dont_panic_in_drop_on_panicked_flush() {
466 struct FailFlushWriter;
468 impl Write for FailFlushWriter {
469 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
472 fn flush(&mut self) -> io::Result<()> {
473 Err(io::Error::last_os_error())
477 let writer = FailFlushWriter;
478 let _writer = BufWriter::new(writer);
480 // If writer panics *again* due to the flush error then the process will
486 #[cfg_attr(target_os = "emscripten", ignore)]
487 fn panic_in_write_doesnt_flush_in_drop() {
488 static WRITES: AtomicUsize = AtomicUsize::new(0);
492 impl Write for PanicWriter {
493 fn write(&mut self, _: &[u8]) -> io::Result<usize> {
494 WRITES.fetch_add(1, Ordering::SeqCst);
497 fn flush(&mut self) -> io::Result<()> {
503 let mut writer = BufWriter::new(PanicWriter);
504 let _ = writer.write(b"hello world");
505 let _ = writer.flush();
510 assert_eq!(WRITES.load(Ordering::SeqCst), 1);
514 fn bench_buffered_reader(b: &mut test::Bencher) {
515 b.iter(|| BufReader::new(io::empty()));
519 fn bench_buffered_reader_small_reads(b: &mut test::Bencher) {
520 let data = (0..u8::MAX).cycle().take(1024 * 4).collect::<Vec<_>>();
522 let mut reader = BufReader::new(&data[..]);
523 let mut buf = [0u8; 4];
525 reader.read_exact(&mut buf).unwrap();
526 core::hint::black_box(&buf);
532 fn bench_buffered_writer(b: &mut test::Bencher) {
533 b.iter(|| BufWriter::new(io::sink()));
536 /// A simple `Write` target, designed to be wrapped by `LineWriter` /
537 /// `BufWriter` / etc, that can have its `write` & `flush` behavior
539 #[derive(Default, Clone)]
540 struct ProgrammableSink {
541 // Writes append to this slice
544 // If true, writes will always be an error
545 pub always_write_error: bool,
547 // If true, flushes will always be an error
548 pub always_flush_error: bool,
550 // If set, only up to this number of bytes will be written in a single
552 pub accept_prefix: Option<usize>,
554 // If set, counts down with each write, and writes return an error
556 pub max_writes: Option<usize>,
558 // If set, attempting to write when max_writes == Some(0) will be an
559 // error; otherwise, it will return Ok(0).
560 pub error_after_max_writes: bool,
563 impl Write for ProgrammableSink {
564 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
565 if self.always_write_error {
566 return Err(io::Error::new(io::ErrorKind::Other, "test - always_write_error"));
569 match self.max_writes {
570 Some(0) if self.error_after_max_writes => {
571 return Err(io::Error::new(io::ErrorKind::Other, "test - max_writes"));
573 Some(0) => return Ok(0),
574 Some(ref mut count) => *count -= 1,
578 let len = match self.accept_prefix {
580 Some(prefix) => data.len().min(prefix),
583 let data = &data[..len];
584 self.buffer.extend_from_slice(data);
589 fn flush(&mut self) -> io::Result<()> {
590 if self.always_flush_error {
591 Err(io::Error::new(io::ErrorKind::Other, "test - always_flush_error"))
598 /// Previously the `LineWriter` could successfully write some bytes but
599 /// then fail to report that it has done so. Additionally, an erroneous
600 /// flush after a successful write was permanently ignored.
602 /// Test that a line writer correctly reports the number of written bytes,
603 /// and that it attempts to flush buffered lines from previous writes
604 /// before processing new data
606 /// Regression test for #37807
608 fn erroneous_flush_retried() {
609 let writer = ProgrammableSink {
610 // Only write up to 4 bytes at a time
611 accept_prefix: Some(4),
613 // Accept the first two writes, then error the others
615 error_after_max_writes: true,
620 // This should write the first 4 bytes. The rest will be buffered, out
621 // to the last newline.
622 let mut writer = LineWriter::new(writer);
623 assert_eq!(writer.write(b"a\nb\nc\nd\ne").unwrap(), 8);
625 // This write should attempt to flush "c\nd\n", then buffer "e". No
626 // errors should happen here because no further writes should be
627 // attempted against `writer`.
628 assert_eq!(writer.write(b"e").unwrap(), 1);
629 assert_eq!(&writer.get_ref().buffer, b"a\nb\nc\nd\n");
634 let mut a = LineWriter::new(Vec::new());
645 assert_eq!(a.get_ref(), b"\n");
659 assert_eq!(a.get_ref(), b"\n");
661 assert_eq!(a.get_ref(), b"\nabac");
662 assert_eq!(a.write_vectored(&[]).unwrap(), 0);
673 assert_eq!(a.write_vectored(&[IoSlice::new(b"a\nb"),]).unwrap(), 3);
674 assert_eq!(a.get_ref(), b"\nabaca\nb");
678 fn line_vectored_partial_and_errors() {
679 use crate::collections::VecDeque;
682 Write { inputs: Vec<&'static [u8]>, output: io::Result<usize> },
683 Flush { output: io::Result<()> },
688 calls: VecDeque<Call>,
691 impl Write for Writer {
692 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
693 self.write_vectored(&[IoSlice::new(buf)])
696 fn write_vectored(&mut self, buf: &[IoSlice<'_>]) -> io::Result<usize> {
697 match self.calls.pop_front().expect("unexpected call to write") {
698 Call::Write { inputs, output } => {
699 assert_eq!(inputs, buf.iter().map(|b| &**b).collect::<Vec<_>>());
702 Call::Flush { .. } => panic!("unexpected call to write; expected a flush"),
706 fn is_write_vectored(&self) -> bool {
710 fn flush(&mut self) -> io::Result<()> {
711 match self.calls.pop_front().expect("Unexpected call to flush") {
712 Call::Flush { output } => output,
713 Call::Write { .. } => panic!("unexpected call to flush; expected a write"),
718 impl Drop for Writer {
720 if !thread::panicking() {
721 assert_eq!(self.calls.len(), 0);
726 // partial writes keep going
727 let mut a = LineWriter::new(Writer::default());
728 a.write_vectored(&[IoSlice::new(&[]), IoSlice::new(b"abc")]).unwrap();
730 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"abc"], output: Ok(1) });
731 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"bc"], output: Ok(2) });
732 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"x", b"\n"], output: Ok(2) });
734 a.write_vectored(&[IoSlice::new(b"x"), IoSlice::new(b"\n")]).unwrap();
736 a.get_mut().calls.push_back(Call::Flush { output: Ok(()) });
739 // erroneous writes stop and don't write more
740 a.get_mut().calls.push_back(Call::Write { inputs: vec![b"x", b"\na"], output: Err(err()) });
741 a.get_mut().calls.push_back(Call::Flush { output: Ok(()) });
742 assert!(a.write_vectored(&[IoSlice::new(b"x"), IoSlice::new(b"\na")]).is_err());
745 fn err() -> io::Error {
746 io::Error::new(io::ErrorKind::Other, "x")
750 /// Test that, in cases where vectored writing is not enabled, the
751 /// LineWriter uses the normal `write` call, which more-correctly handles
754 fn line_vectored_ignored() {
755 let writer = ProgrammableSink::default();
756 let mut writer = LineWriter::new(writer);
760 IoSlice::new(b"Line 1\nLine"),
761 IoSlice::new(b" 2\nLine 3\nL"),
764 IoSlice::new(b"ine 4"),
765 IoSlice::new(b"\nLine 5\n"),
768 let count = writer.write_vectored(&content).unwrap();
769 assert_eq!(count, 11);
770 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
772 let count = writer.write_vectored(&content[2..]).unwrap();
773 assert_eq!(count, 11);
774 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
776 let count = writer.write_vectored(&content[5..]).unwrap();
777 assert_eq!(count, 5);
778 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
780 let count = writer.write_vectored(&content[6..]).unwrap();
781 assert_eq!(count, 8);
783 writer.get_ref().buffer.as_slice(),
784 b"Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n".as_ref()
788 /// Test that, given this input:
795 /// And given a result that only writes to midway through Line 2
797 /// That only up to the end of Line 3 is buffered
799 /// This behavior is desirable because it prevents flushing partial lines
801 fn partial_write_buffers_line() {
802 let writer = ProgrammableSink { accept_prefix: Some(13), ..Default::default() };
803 let mut writer = LineWriter::new(writer);
805 assert_eq!(writer.write(b"Line 1\nLine 2\nLine 3\nLine4").unwrap(), 21);
806 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2");
808 assert_eq!(writer.write(b"Line 4").unwrap(), 6);
809 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\n");
812 /// Test that, given this input:
818 /// And given that the full write of lines 1 and 2 was successful
819 /// That data up to Line 3 is buffered
821 fn partial_line_buffered_after_line_write() {
822 let writer = ProgrammableSink::default();
823 let mut writer = LineWriter::new(writer);
825 assert_eq!(writer.write(b"Line 1\nLine 2\nLine 3").unwrap(), 20);
826 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\n");
828 assert!(writer.flush().is_ok());
829 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3");
832 /// Test that, given a partial line that exceeds the length of
833 /// LineBuffer's buffer (that is, without a trailing newline), that that
834 /// line is written to the inner writer
836 fn long_line_flushed() {
837 let writer = ProgrammableSink::default();
838 let mut writer = LineWriter::with_capacity(5, writer);
840 assert_eq!(writer.write(b"0123456789").unwrap(), 10);
841 assert_eq!(&writer.get_ref().buffer, b"0123456789");
844 /// Test that, given a very long partial line *after* successfully
845 /// flushing a complete line, that that line is buffered unconditionally,
846 /// and no additional writes take place. This assures the property that
847 /// `write` should make at-most-one attempt to write new data.
849 fn line_long_tail_not_flushed() {
850 let writer = ProgrammableSink::default();
851 let mut writer = LineWriter::with_capacity(5, writer);
853 // Assert that Line 1\n is flushed, and 01234 is buffered
854 assert_eq!(writer.write(b"Line 1\n0123456789").unwrap(), 12);
855 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
857 // Because the buffer is full, this subsequent write will flush it
858 assert_eq!(writer.write(b"5").unwrap(), 1);
859 assert_eq!(&writer.get_ref().buffer, b"Line 1\n01234");
862 /// Test that, if an attempt to pre-flush buffered data returns Ok(0),
863 /// this is propagated as an error.
865 fn line_buffer_write0_error() {
866 let writer = ProgrammableSink {
867 // Accept one write, then return Ok(0) on subsequent ones
872 let mut writer = LineWriter::new(writer);
874 // This should write "Line 1\n" and buffer "Partial"
875 assert_eq!(writer.write(b"Line 1\nPartial").unwrap(), 14);
876 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
878 // This will attempt to flush "partial", which will return Ok(0), which
879 // needs to be an error, because we've already informed the client
880 // that we accepted the write.
881 let err = writer.write(b" Line End\n").unwrap_err();
882 assert_eq!(err.kind(), ErrorKind::WriteZero);
883 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
886 /// Test that, if a write returns Ok(0) after a successful pre-flush, this
887 /// is propagated as Ok(0)
889 fn line_buffer_write0_normal() {
890 let writer = ProgrammableSink {
891 // Accept two writes, then return Ok(0) on subsequent ones
896 let mut writer = LineWriter::new(writer);
898 // This should write "Line 1\n" and buffer "Partial"
899 assert_eq!(writer.write(b"Line 1\nPartial").unwrap(), 14);
900 assert_eq!(&writer.get_ref().buffer, b"Line 1\n");
902 // This will flush partial, which will succeed, but then return Ok(0)
903 // when flushing " Line End\n"
904 assert_eq!(writer.write(b" Line End\n").unwrap(), 0);
905 assert_eq!(&writer.get_ref().buffer, b"Line 1\nPartial");
908 /// LineWriter has a custom `write_all`; make sure it works correctly
910 fn line_write_all() {
911 let writer = ProgrammableSink {
912 // Only write 5 bytes at a time
913 accept_prefix: Some(5),
916 let mut writer = LineWriter::new(writer);
918 writer.write_all(b"Line 1\nLine 2\nLine 3\nLine 4\nPartial").unwrap();
919 assert_eq!(&writer.get_ref().buffer, b"Line 1\nLine 2\nLine 3\nLine 4\n");
920 writer.write_all(b" Line 5\n").unwrap();
922 writer.get_ref().buffer.as_slice(),
923 b"Line 1\nLine 2\nLine 3\nLine 4\nPartial Line 5\n".as_ref(),
928 fn line_write_all_error() {
929 let writer = ProgrammableSink {
930 // Only accept up to 3 writes of up to 5 bytes each
931 accept_prefix: Some(5),
936 let mut writer = LineWriter::new(writer);
937 let res = writer.write_all(b"Line 1\nLine 2\nLine 3\nLine 4\nPartial");
938 assert!(res.is_err());
939 // An error from write_all leaves everything in an indeterminate state,
940 // so there's nothing else to test here
943 /// Under certain circumstances, the old implementation of LineWriter
944 /// would try to buffer "to the last newline" but be forced to buffer
945 /// less than that, leading to inappropriate partial line writes.
946 /// Regression test for that issue.
948 fn partial_multiline_buffering() {
949 let writer = ProgrammableSink {
950 // Write only up to 5 bytes at a time
951 accept_prefix: Some(5),
955 let mut writer = LineWriter::with_capacity(10, writer);
957 let content = b"AAAAABBBBB\nCCCCDDDDDD\nEEE";
959 // When content is written, LineWriter will try to write blocks A, B,
960 // C, and D. Only block A will succeed. Under the old behavior, LineWriter
961 // would then try to buffer B, C and D, but because its capacity is 10,
962 // it will only be able to buffer B and C. We don't want to buffer
963 // partial lines concurrent with whole lines, so the correct behavior
964 // is to buffer only block B (out to the newline)
965 assert_eq!(writer.write(content).unwrap(), 11);
966 assert_eq!(writer.get_ref().buffer, *b"AAAAA");
968 writer.flush().unwrap();
969 assert_eq!(writer.get_ref().buffer, *b"AAAAABBBBB\n");
972 /// Same as test_partial_multiline_buffering, but in the event NO full lines
973 /// fit in the buffer, just buffer as much as possible
975 fn partial_multiline_buffering_without_full_line() {
976 let writer = ProgrammableSink {
977 // Write only up to 5 bytes at a time
978 accept_prefix: Some(5),
982 let mut writer = LineWriter::with_capacity(5, writer);
984 let content = b"AAAAABBBBBBBBBB\nCCCCC\nDDDDD";
986 // When content is written, LineWriter will try to write blocks A, B,
987 // and C. Only block A will succeed. Under the old behavior, LineWriter
988 // would then try to buffer B and C, but because its capacity is 5,
989 // it will only be able to buffer part of B. Because it's not possible
990 // for it to buffer any complete lines, it should buffer as much of B as
992 assert_eq!(writer.write(content).unwrap(), 10);
993 assert_eq!(writer.get_ref().buffer, *b"AAAAA");
995 writer.flush().unwrap();
996 assert_eq!(writer.get_ref().buffer, *b"AAAAABBBBB");
999 #[derive(Debug, Clone, PartialEq, Eq)]
1000 enum RecordedEvent {
1005 #[derive(Debug, Clone, Default)]
1006 struct WriteRecorder {
1007 pub events: Vec<RecordedEvent>,
1010 impl Write for WriteRecorder {
1011 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
1012 use crate::str::from_utf8;
1014 self.events.push(RecordedEvent::Write(from_utf8(buf).unwrap().to_string()));
1018 fn flush(&mut self) -> io::Result<()> {
1019 self.events.push(RecordedEvent::Flush);
1024 /// Test that a normal, formatted writeln only results in a single write
1025 /// call to the underlying writer. A naive implementation of
1026 /// LineWriter::write_all results in two writes: one of the buffered data,
1027 /// and another of the final substring in the formatted set
1029 fn single_formatted_write() {
1030 let writer = WriteRecorder::default();
1031 let mut writer = LineWriter::new(writer);
1033 // Under a naive implementation of LineWriter, this will result in two
1034 // writes: "hello, world" and "!\n", because write() has to flush the
1035 // buffer before attempting to write the last "!\n". write_all shouldn't
1036 // have this limitation.
1037 writeln!(&mut writer, "{}, {}!", "hello", "world").unwrap();
1038 assert_eq!(writer.get_ref().events, [RecordedEvent::Write("hello, world!\n".to_string())]);