1 /// darwin_fd_limit exists to work around an issue where launchctl on macOS
2 /// defaults the rlimit maxfiles to 256/unlimited. The default soft limit of 256
3 /// ends up being far too low for our multithreaded scheduler testing, depending
4 /// on the number of cores available.
6 /// This fixes issue #7772.
7 #[cfg(any(target_os = "macos", target_os = "ios"))]
8 #[allow(non_camel_case_types)]
9 pub unsafe fn raise_fd_limit() {
12 use std::mem::size_of_val;
13 use std::ptr::null_mut;
15 static CTL_KERN: libc::c_int = 1;
16 static KERN_MAXFILESPERPROC: libc::c_int = 29;
18 // The strategy here is to fetch the current resource limits, read the
19 // kern.maxfilesperproc sysctl value, and bump the soft resource limit for
20 // maxfiles up to the sysctl value.
22 // Fetch the kern.maxfilesperproc value
23 let mut mib: [libc::c_int; 2] = [CTL_KERN, KERN_MAXFILESPERPROC];
24 let mut maxfiles: libc::c_int = 0;
25 let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
29 &mut maxfiles as *mut _ as *mut _,
35 let err = io::Error::last_os_error();
36 panic!("raise_fd_limit: error calling sysctl: {}", err);
39 // Fetch the current resource limits
40 let mut rlim = libc::rlimit {
44 if libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) != 0 {
45 let err = io::Error::last_os_error();
46 panic!("raise_fd_limit: error calling getrlimit: {}", err);
49 // Make sure we're only ever going to increase the rlimit.
50 if rlim.rlim_cur < maxfiles as libc::rlim_t {
51 // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit.
52 rlim.rlim_cur = cmp::min(maxfiles as libc::rlim_t, rlim.rlim_max);
54 // Set our newly-increased resource limit.
55 if libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) != 0 {
56 let err = io::Error::last_os_error();
57 panic!("raise_fd_limit: error calling setrlimit: {}", err);
62 #[cfg(not(any(target_os = "macos", target_os = "ios")))]
63 pub unsafe fn raise_fd_limit() {}