1 //! Buffer management for same-process client<->server communication.
3 use std::io::{self, Write};
5 use std::ops::{Deref, DerefMut};
9 pub struct Buffer<T: Copy> {
13 reserve: extern "C" fn(Buffer<T>, usize) -> Buffer<T>,
14 drop: extern "C" fn(Buffer<T>),
17 unsafe impl<T: Copy + Sync> Sync for Buffer<T> {}
18 unsafe impl<T: Copy + Send> Send for Buffer<T> {}
20 impl<T: Copy> Default for Buffer<T> {
21 fn default() -> Self {
26 impl<T: Copy> Deref for Buffer<T> {
28 fn deref(&self) -> &[T] {
29 unsafe { slice::from_raw_parts(self.data as *const T, self.len) }
33 impl<T: Copy> DerefMut for Buffer<T> {
34 fn deref_mut(&mut self) -> &mut [T] {
35 unsafe { slice::from_raw_parts_mut(self.data, self.len) }
39 impl<T: Copy> Buffer<T> {
40 pub(super) fn new() -> Self {
44 pub(super) fn clear(&mut self) {
48 pub(super) fn take(&mut self) -> Self {
52 // We have the array method separate from extending from a slice. This is
53 // because in the case of small arrays, codegen can be more efficient
54 // (avoiding a memmove call). With extend_from_slice, LLVM at least
55 // currently is not able to make that optimization.
56 pub(super) fn extend_from_array<const N: usize>(&mut self, xs: &[T; N]) {
57 if xs.len() > (self.capacity - self.len) {
59 *self = (b.reserve)(b, xs.len());
62 xs.as_ptr().copy_to_nonoverlapping(self.data.add(self.len), xs.len());
67 pub(super) fn extend_from_slice(&mut self, xs: &[T]) {
68 if xs.len() > (self.capacity - self.len) {
70 *self = (b.reserve)(b, xs.len());
73 xs.as_ptr().copy_to_nonoverlapping(self.data.add(self.len), xs.len());
78 pub(super) fn push(&mut self, v: T) {
79 // The code here is taken from Vec::push, and we know that reserve()
80 // will panic if we're exceeding isize::MAX bytes and so there's no need
81 // to check for overflow.
82 if self.len == self.capacity {
84 *self = (b.reserve)(b, 1);
87 *self.data.add(self.len) = v;
93 impl Write for Buffer<u8> {
94 fn write(&mut self, xs: &[u8]) -> io::Result<usize> {
95 self.extend_from_slice(xs);
99 fn write_all(&mut self, xs: &[u8]) -> io::Result<()> {
100 self.extend_from_slice(xs);
104 fn flush(&mut self) -> io::Result<()> {
109 impl<T: Copy> Drop for Buffer<T> {
116 impl<T: Copy> From<Vec<T>> for Buffer<T> {
117 fn from(mut v: Vec<T>) -> Self {
118 let (data, len, capacity) = (v.as_mut_ptr(), v.len(), v.capacity());
121 // This utility function is nested in here because it can *only*
122 // be safely called on `Buffer`s created by *this* `proc_macro`.
123 fn to_vec<T: Copy>(b: Buffer<T>) -> Vec<T> {
125 let Buffer { data, len, capacity, .. } = b;
127 Vec::from_raw_parts(data, len, capacity)
131 extern "C" fn reserve<T: Copy>(b: Buffer<T>, additional: usize) -> Buffer<T> {
132 let mut v = to_vec(b);
133 v.reserve(additional);
137 extern "C" fn drop<T: Copy>(b: Buffer<T>) {
138 mem::drop(to_vec(b));
141 Buffer { data, len, capacity, reserve, drop }