Introduced dylib extension format, cleared up shutdown sequence

This commit is contained in:
2026-01-17 00:23:35 +01:00
parent 1a7230ce9b
commit 6a3c1d5917
18 changed files with 214 additions and 113 deletions

View File

@@ -27,6 +27,7 @@ pub fn pipe(size: usize) -> (Writer, Reader) {
size,
mut read_waker,
mut write_waker,
mut flush_waker,
reader_dropped,
writer_dropped,
// irrelevant if correctly dropped
@@ -37,11 +38,11 @@ pub fn pipe(size: usize) -> (Writer, Reader) {
state: _,
} = *unsafe { Box::from_raw(val as *mut AsyncRingbuffer) };
if !writer_dropped || !reader_dropped {
eprintln!("Pipe dropped in err before reader or writer");
abort()
}
read_waker.drop();
write_waker.drop();
flush_waker.drop();
unsafe { dealloc(start, pipe_layout(size)) }
}
let state = Box::into_raw(Box::new(AsyncRingbuffer {
@@ -52,6 +53,7 @@ pub fn pipe(size: usize) -> (Writer, Reader) {
write_idx: 0,
read_waker: Trigger::empty(),
write_waker: Trigger::empty(),
flush_waker: Trigger::empty(),
reader_dropped: false,
writer_dropped: false,
drop,
@@ -108,18 +110,21 @@ struct AsyncRingbuffer {
write_idx: usize,
read_waker: Trigger,
write_waker: Trigger,
flush_waker: Trigger,
reader_dropped: bool,
writer_dropped: bool,
drop: extern "C" fn(*const ()),
}
impl AsyncRingbuffer {
fn drop_writer(&mut self) {
self.read_waker.invoke();
self.writer_dropped = true;
if self.reader_dropped {
(self.drop)(self.state)
}
}
fn drop_reader(&mut self) {
self.write_waker.invoke();
self.reader_dropped = true;
if self.writer_dropped {
(self.drop)(self.state)
@@ -134,6 +139,15 @@ impl AsyncRingbuffer {
self.write_waker = Trigger::new(waker.clone());
Poll::Pending
}
fn flush_wait<T>(&mut self, waker: &Waker) -> Poll<io::Result<T>> {
if self.reader_dropped {
return Poll::Ready(Err(broken_pipe_error()));
}
self.read_waker.invoke();
self.flush_waker.drop();
self.flush_waker = Trigger::new(waker.clone());
Poll::Pending
}
fn reader_wait(&mut self, waker: &Waker) -> Poll<io::Result<usize>> {
if self.writer_dropped {
return Poll::Ready(Err(broken_pipe_error()));
@@ -157,6 +171,36 @@ impl AsyncRingbuffer {
}
fn is_full(&self) -> bool { (self.write_idx + 1) % self.size == self.read_idx }
fn is_empty(&self) -> bool { self.write_idx == self.read_idx }
fn buf_free(&self) -> usize {
let Self { read_idx, write_idx, size, .. } = self;
if write_idx < read_idx { *read_idx - write_idx - 1 } else { size - write_idx + read_idx }
}
fn wrapping_write_unchecked(&mut self, buf: &[u8]) -> usize {
unsafe {
let Self { read_idx, write_idx, size, .. } = *self;
if write_idx < read_idx {
// Non-wrapping backside write w < r <= s
let count = buf.len().min(read_idx - write_idx - 1);
self.non_wrapping_write_unchecked(&buf[0..count]);
count
} else if write_idx + buf.len() < size {
// Non-wrapping frontside write r <= w + b < s
self.non_wrapping_write_unchecked(&buf[0..buf.len()]);
buf.len()
} else if read_idx == 0 {
// Frontside write up to origin r=0 < s < w + b
self.non_wrapping_write_unchecked(&buf[0..size - write_idx - 1]);
size - write_idx - 1
} else {
let (end, start) = buf.split_at(size - write_idx);
// Wrapping write r < s < w + b
self.non_wrapping_write_unchecked(end);
let start_count = start.len().min(read_idx - 1);
self.non_wrapping_write_unchecked(&start[0..start_count]);
end.len() + start_count
}
}
}
}
fn already_closed_error() -> io::Error {
@@ -166,6 +210,12 @@ fn broken_pipe_error() -> io::Error {
io::Error::new(io::ErrorKind::BrokenPipe, "Pipe already closed from other end")
}
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum SyncWriteError {
BufferFull,
AlreadyClosed,
}
/// A binary safe [AsyncWrite] implementor writing to a ringbuffer created by
/// [pipe].
#[repr(C)]
@@ -177,6 +227,17 @@ impl Writer {
None => Err(already_closed_error()),
}
}
pub fn try_write_all(self: Pin<&mut Self>, data: &[u8]) -> Result<(), SyncWriteError> {
unsafe {
let state = self.get_state().map_err(|_| SyncWriteError::AlreadyClosed)?;
if state.buf_free() <= data.len() {
return Err(SyncWriteError::BufferFull);
}
state.wrapping_write_unchecked(data);
state.write_waker.invoke();
Ok(())
}
}
}
impl AsyncWrite for Writer {
fn poll_close(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
@@ -194,7 +255,7 @@ impl AsyncWrite for Writer {
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
unsafe {
let data = self.as_mut().get_state()?;
if data.is_empty() { Poll::Ready(Ok(())) } else { data.writer_wait(cx.waker()) }
if data.is_empty() { Poll::Ready(Ok(())) } else { data.flush_wait(cx.waker()) }
}
}
fn poll_write(
@@ -204,33 +265,13 @@ impl AsyncWrite for Writer {
) -> Poll<io::Result<usize>> {
unsafe {
let data = self.as_mut().get_state()?;
let AsyncRingbuffer { write_idx, read_idx, size, .. } = *data;
if !buf.is_empty() && data.is_empty() {
data.read_waker.invoke();
}
if !buf.is_empty() && data.is_full() {
// Writer is blocked
data.writer_wait(cx.waker())
} else if write_idx < read_idx {
// Non-wrapping backside write w < r <= s
let count = buf.len().min(read_idx - write_idx - 1);
data.non_wrapping_write_unchecked(&buf[0..count]);
Poll::Ready(Ok(count))
} else if data.write_idx + buf.len() < size {
// Non-wrapping frontside write r <= w + b < s
data.non_wrapping_write_unchecked(&buf[0..buf.len()]);
Poll::Ready(Ok(buf.len()))
} else if read_idx == 0 {
// Frontside write up to origin r=0 < s < w + b
data.non_wrapping_write_unchecked(&buf[0..size - write_idx - 1]);
Poll::Ready(Ok(size - write_idx - 1))
} else {
let (end, start) = buf.split_at(size - write_idx);
// Wrapping write r < s < w + b
data.non_wrapping_write_unchecked(end);
let start_count = start.len().min(read_idx - 1);
data.non_wrapping_write_unchecked(&start[0..start_count]);
Poll::Ready(Ok(end.len() + start_count))
Poll::Ready(Ok(data.wrapping_write_unchecked(buf)))
}
}
}
@@ -261,7 +302,7 @@ impl AsyncRead for Reader {
if !buf.is_empty() && data.is_full() {
data.write_waker.invoke();
}
if !buf.is_empty() && data.is_empty() {
let poll = if !buf.is_empty() && data.is_empty() {
// Nothing to read, waiting...
data.reader_wait(cx.waker())
} else if read_idx < write_idx {
@@ -280,7 +321,11 @@ impl AsyncRead for Reader {
let start_count = start.len().min(write_idx);
data.non_wrapping_read_unchecked(&mut start[0..start_count]);
Poll::Ready(Ok(end.len() + start_count))
};
if !buf.is_empty() && data.is_empty() {
data.flush_waker.invoke();
}
poll
}
}
}