1 // FIXME: This is a complete copy of `cargo/src/cargo/util/read2.rs`
2 // Consider unify the read2() in libstd, cargo and this to prevent further code duplication.
4 pub use self::imp::read2;
5 use std::io::{self, Write};
7 use std::process::{Child, Output};
9 pub fn read2_abbreviated(mut child: Child, exclude_from_len: &[String]) -> io::Result<Output> {
10 let mut stdout = ProcOutput::new();
11 let mut stderr = ProcOutput::new();
13 drop(child.stdin.take());
15 child.stdout.take().unwrap(),
16 child.stderr.take().unwrap(),
17 &mut |is_stdout, data, _| {
18 if is_stdout { &mut stdout } else { &mut stderr }.extend(data, exclude_from_len);
22 let status = child.wait()?;
24 Ok(Output { status, stdout: stdout.into_bytes(), stderr: stderr.into_bytes() })
27 const HEAD_LEN: usize = 160 * 1024;
28 const TAIL_LEN: usize = 256 * 1024;
29 const EXCLUDED_PLACEHOLDER_LEN: isize = 32;
32 Full { bytes: Vec<u8>, excluded_len: isize },
33 Abbreviated { head: Vec<u8>, skipped: usize, tail: Box<[u8]> },
38 ProcOutput::Full { bytes: Vec::new(), excluded_len: 0 }
41 fn extend(&mut self, data: &[u8], exclude_from_len: &[String]) {
42 let new_self = match *self {
43 ProcOutput::Full { ref mut bytes, ref mut excluded_len } => {
44 let old_len = bytes.len();
45 bytes.extend_from_slice(data);
47 // We had problems in the past with tests failing only in some environments,
48 // due to the length of the base path pushing the output size over the limit.
50 // To make those failures deterministic across all environments we ignore known
51 // paths when calculating the string length, while still including the full
52 // path in the output. This could result in some output being larger than the
53 // threshold, but it's better than having nondeterministic failures.
55 // The compiler emitting only excluded strings is addressed by adding a
56 // placeholder size for each excluded segment, which will eventually reach
57 // the configured threshold.
58 for pattern in exclude_from_len {
59 let pattern_bytes = pattern.as_bytes();
60 // We start matching `pattern_bytes - 1` into the previously loaded data,
61 // to account for the fact a pattern might be included across multiple
62 // `extend` calls. Starting from `- 1` avoids double-counting patterns.
63 let matches = (&bytes[(old_len.saturating_sub(pattern_bytes.len() - 1))..])
64 .windows(pattern_bytes.len())
65 .filter(|window| window == &pattern_bytes)
67 *excluded_len += matches as isize
68 * (EXCLUDED_PLACEHOLDER_LEN - pattern_bytes.len() as isize);
71 let new_len = bytes.len();
72 if (new_len as isize + *excluded_len) as usize <= HEAD_LEN + TAIL_LEN {
76 let mut head = replace(bytes, Vec::new());
77 let mut middle = head.split_off(HEAD_LEN);
78 let tail = middle.split_off(middle.len() - TAIL_LEN).into_boxed_slice();
79 let skipped = new_len - HEAD_LEN - TAIL_LEN;
80 ProcOutput::Abbreviated { head, skipped, tail }
82 ProcOutput::Abbreviated { ref mut skipped, ref mut tail, .. } => {
83 *skipped += data.len();
84 if data.len() <= TAIL_LEN {
85 tail[..data.len()].copy_from_slice(data);
86 tail.rotate_left(data.len());
88 tail.copy_from_slice(&data[(data.len() - TAIL_LEN)..]);
96 fn into_bytes(self) -> Vec<u8> {
98 ProcOutput::Full { bytes, .. } => bytes,
99 ProcOutput::Abbreviated { mut head, skipped, tail } => {
100 write!(&mut head, "\n\n<<<<<< SKIPPED {} BYTES >>>>>>\n\n", skipped).unwrap();
101 head.extend_from_slice(&tail);
108 #[cfg(not(any(unix, windows)))]
110 use std::io::{self, Read};
111 use std::process::{ChildStderr, ChildStdout};
114 out_pipe: ChildStdout,
115 err_pipe: ChildStderr,
116 data: &mut dyn FnMut(bool, &mut Vec<u8>, bool),
117 ) -> io::Result<()> {
118 let mut buffer = Vec::new();
119 out_pipe.read_to_end(&mut buffer)?;
120 data(true, &mut buffer, true);
122 err_pipe.read_to_end(&mut buffer)?;
123 data(false, &mut buffer, true);
131 use std::io::prelude::*;
133 use std::os::unix::prelude::*;
134 use std::process::{ChildStderr, ChildStdout};
137 mut out_pipe: ChildStdout,
138 mut err_pipe: ChildStderr,
139 data: &mut dyn FnMut(bool, &mut Vec<u8>, bool),
140 ) -> io::Result<()> {
142 libc::fcntl(out_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK);
143 libc::fcntl(err_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK);
146 let mut out_done = false;
147 let mut err_done = false;
148 let mut out = Vec::new();
149 let mut err = Vec::new();
151 let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() };
152 fds[0].fd = out_pipe.as_raw_fd();
153 fds[0].events = libc::POLLIN;
154 fds[1].fd = err_pipe.as_raw_fd();
155 fds[1].events = libc::POLLIN;
160 // wait for either pipe to become readable using `select`
161 let r = unsafe { libc::poll(fds.as_mut_ptr(), nfds, -1) };
163 let err = io::Error::last_os_error();
164 if err.kind() == io::ErrorKind::Interrupted {
170 // Read as much as we can from each pipe, ignoring EWOULDBLOCK or
171 // EAGAIN. If we hit EOF, then this will happen because the underlying
172 // reader will return Ok(0), in which case we'll see `Ok` ourselves. In
173 // this case we flip the other fd back into blocking mode and read
174 // whatever's leftover on that file descriptor.
175 let handle = |res: io::Result<_>| match res {
178 if e.kind() == io::ErrorKind::WouldBlock {
185 if !err_done && fds[errfd].revents != 0 && handle(err_pipe.read_to_end(&mut err))? {
189 data(false, &mut err, err_done);
190 if !out_done && fds[0].revents != 0 && handle(out_pipe.read_to_end(&mut out))? {
192 fds[0].fd = err_pipe.as_raw_fd();
196 data(true, &mut out, out_done);
205 use std::os::windows::prelude::*;
206 use std::process::{ChildStderr, ChildStdout};
209 use miow::iocp::{CompletionPort, CompletionStatus};
210 use miow::pipe::NamedPipe;
211 use miow::Overlapped;
212 use winapi::shared::winerror::ERROR_BROKEN_PIPE;
215 dst: &'a mut Vec<u8>,
216 overlapped: Overlapped,
222 out_pipe: ChildStdout,
223 err_pipe: ChildStderr,
224 data: &mut dyn FnMut(bool, &mut Vec<u8>, bool),
225 ) -> io::Result<()> {
226 let mut out = Vec::new();
227 let mut err = Vec::new();
229 let port = CompletionPort::new(1)?;
230 port.add_handle(0, &out_pipe)?;
231 port.add_handle(1, &err_pipe)?;
234 let mut out_pipe = Pipe::new(out_pipe, &mut out);
235 let mut err_pipe = Pipe::new(err_pipe, &mut err);
240 let mut status = [CompletionStatus::zero(), CompletionStatus::zero()];
242 while !out_pipe.done || !err_pipe.done {
243 for status in port.get_many(&mut status, None)? {
244 if status.token() == 0 {
245 out_pipe.complete(status);
246 data(true, out_pipe.dst, out_pipe.done);
249 err_pipe.complete(status);
250 data(false, err_pipe.dst, err_pipe.done);
261 unsafe fn new<P: IntoRawHandle>(p: P, dst: &'a mut Vec<u8>) -> Pipe<'a> {
264 pipe: NamedPipe::from_raw_handle(p.into_raw_handle()),
265 overlapped: Overlapped::zero(),
270 unsafe fn read(&mut self) -> io::Result<()> {
271 let dst = slice_to_end(self.dst);
272 match self.pipe.read_overlapped(dst, self.overlapped.raw()) {
275 if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
285 unsafe fn complete(&mut self, status: &CompletionStatus) {
286 let prev = self.dst.len();
287 self.dst.set_len(prev + status.bytes_transferred() as usize);
288 if status.bytes_transferred() == 0 {
294 unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
295 if v.capacity() == 0 {
298 if v.capacity() == v.len() {
301 slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize), v.capacity() - v.len())