diff --git a/test/BUILD.gn b/test/BUILD.gn index 60d87acac85dc36d1a56c8889f736c627a0cbc06..b7bf4281a124c2f8dca454421b5c60dcdbc7f8ef 100644 --- a/test/BUILD.gn +++ b/test/BUILD.gn @@ -24,6 +24,7 @@ ohos_rust_unittest("rust_ylong_runtime_test_ut") { "--cfg=feature=\"sync\"", "--cfg=feature=\"signal\"", "--cfg=feature=\"time\"", + "--cfg=feature=\"aio\"", ] sources = [ "../ylong_runtime/src/lib.rs" ] diff --git a/ylong_io/BUILD.gn b/ylong_io/BUILD.gn index 0cab989b44e5f4c18d4775e14af395dff1ef4010..6611cd7f0ce1faa39691f1652b0b34d3aa74eed3 100644 --- a/ylong_io/BUILD.gn +++ b/ylong_io/BUILD.gn @@ -23,6 +23,7 @@ ohos_rust_static_library("ylong_io") { features = [ "tcp", "udp", + "aio", ] sources = [ "src/lib.rs" ] diff --git a/ylong_io/Cargo.toml b/ylong_io/Cargo.toml index cab5af6812351ffee102780a4501395eb82f15f9..c0fbc98955453c1215c8407c4db1ac42ada3580c 100644 --- a/ylong_io/Cargo.toml +++ b/ylong_io/Cargo.toml @@ -8,9 +8,10 @@ repository = "https://gitee.com/openharmony/commonlibrary_rust_ylong_runtime" keywords = ["ylong", "io", "epoll"] [features] -default = ["tcp", "udp"] +default = ["tcp", "udp", "aio"] udp = [] tcp = [] +aio = [] [dependencies] libc = "0.2.134" diff --git a/ylong_io/src/interest.rs b/ylong_io/src/interest.rs index 73845295b79b9a509c3236af8fb869863759174f..328385521f6d0168e8c56812d5c2b1fcc394f63b 100644 --- a/ylong_io/src/interest.rs +++ b/ylong_io/src/interest.rs @@ -59,6 +59,11 @@ impl Interest { io_event as libc::c_uint } + + /// Convert interest to the usize. + pub fn as_usize(&self) -> usize { + self.0.get() as usize + } } impl ops::BitOr for Interest { diff --git a/ylong_io/src/lib.rs b/ylong_io/src/lib.rs index 690d446be9c446ffb75bb69844be90e34772daf4..a9d833a86dd44a7a96d02bdfe3e9ea905e8dbf1b 100644 --- a/ylong_io/src/lib.rs +++ b/ylong_io/src/lib.rs @@ -27,7 +27,12 @@ pub use sys::{Event, EventTrait, Events, Selector}; pub use sys::{SocketAddr, UnixDatagram, UnixListener, UnixStream}; #[cfg(feature = "tcp")] pub use sys::{TcpListener, TcpStream}; - +#[cfg(all(unix, feature = "aio"))] +pub use sys::{io_get_events, io_submit, io_setup, io_destroy, + FileCommand, AioResult, AioContextT, NoopLock, RawMutexTrait, + LifetimeExtender, LockedBuf, AtomicNode, LinkedList, RequestMutex, + ReadFlags, WriteFlags, AioEventFd, EventFdError, Sink, Stream, StreamExt, + Link, RawMutex, PointerOps, Adapter, DefaultLinkOps,LinkedListOps, LinkOps}; /// unix-specific #[cfg(unix)] pub mod unix { diff --git a/ylong_io/src/poll.rs b/ylong_io/src/poll.rs index a52a944dc1bdfa1b212a53cb4955b9e3b77f58e3..cc1bcf564be9b151b495f513e62e106cf1100593 100644 --- a/ylong_io/src/poll.rs +++ b/ylong_io/src/poll.rs @@ -24,6 +24,7 @@ pub struct Poll { impl Poll { /// Creates a new Poll. pub fn new() -> io::Result { + println!("Poll new"); Selector::new().map(|selector| Poll { selector: { selector }, }) @@ -43,6 +44,7 @@ impl Poll { where S: Source + ?Sized, { + println!("poll register"); source.register(&self.selector, token, interests) } diff --git a/ylong_io/src/sys/mod.rs b/ylong_io/src/sys/mod.rs index b6704091d1adb394d01c136bf467e2b3d2425f95..220d4e4824629c4fbec384464a783954cbfdd366 100644 --- a/ylong_io/src/sys/mod.rs +++ b/ylong_io/src/sys/mod.rs @@ -33,6 +33,16 @@ macro_rules! cfg_udp { } } +#[cfg(unix)] +macro_rules! cfg_aio { + ($($item:item)*) => { + $( + #[cfg(feature = "aio")] + $item + )* + } +} + #[cfg(windows)] macro_rules! cfg_net { ($($item:item)*) => { diff --git a/ylong_io/src/sys/unix/aio/aio_event/aio_eventfd.rs b/ylong_io/src/sys/unix/aio/aio_event/aio_eventfd.rs new file mode 100644 index 0000000000000000000000000000000000000000..edf7322c1039fff37a2226087352eefe932143ca --- /dev/null +++ b/ylong_io/src/sys/unix/aio/aio_event/aio_eventfd.rs @@ -0,0 +1,48 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use std::io; +use std::fs::File; +use std::os::fd::AsRawFd; +use crate::{Fd, Interest, Selector, Source, Token}; + +/// EventFdError +#[derive(Debug)] +pub enum EventFdError { + /// Error creating EventFd: + CreateError(io::Error), + /// Poll error + PollError(io::Error), + /// Read error + ReadError(io::Error), +} + +/// AIO EventFd. +pub struct AioEventFd { + /// AIO operation file + pub inner: File, +} + +impl Source for AioEventFd { + fn register(&mut self, selector: &Selector, token: Token, interests: Interest) -> std::io::Result<()> { + println!("aioeventfd register"); + selector.register(self.get_fd(), token, interests) + } + + fn deregister(&mut self, selector: &Selector) -> std::io::Result<()> { + selector.deregister(self.get_fd()) + } + + fn get_fd(&self) -> Fd { + self.inner.as_raw_fd() + } +} diff --git a/ylong_io/src/sys/unix/aio/aio_event/eventfd_trait.rs b/ylong_io/src/sys/unix/aio/aio_event/eventfd_trait.rs new file mode 100644 index 0000000000000000000000000000000000000000..955b150a882c96a7617d4c0175630632dd52d7b5 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/aio_event/eventfd_trait.rs @@ -0,0 +1,162 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// A stream of values produced asynchronously. +pub trait Stream { + /// Values yielded by the stream. + type Item; + + /// Attempt to pull out the next value of this stream, registering the + /// current task for wakeup if the value is not yet available, and returning + /// `None` if the stream is exhausted. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Returns the bounds on the remaining length of the stream. + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, None) + } +} + +impl StreamExt for T where T: Stream {} + +/// An extension trait for `Stream`s that provides a variety of convenient +/// combinator functions. +pub trait StreamExt: Stream { + /// Creates a future that resolves to the next item in the stream. + fn next(&mut self) -> Next<'_, Self> + where + Self: Unpin, + { + assert_future::, _>(Next::new(self)) + } + + /// Converts this stream into a future of `(next_item, tail_of_stream)`. + /// If the stream terminates, then the next item is [`None`]. + fn poll_next_unpin(&mut self, cx: &mut Context<'_>) -> Poll> + where + Self: Unpin, + { + Pin::new(self).poll_next(cx) + } +} + +pub(crate) fn assert_future(future: F) -> F + where + F: Future, +{ + future +} + +#[derive(Debug)] +pub struct Next<'a, St: ?Sized> { + stream: &'a mut St, +} + +impl<'a, St: ?Sized + Stream + Unpin> Next<'a, St> { + pub(super) fn new(stream: &'a mut St) -> Self { + Self { stream } + } +} + +impl Future for Next<'_, St> { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.stream.poll_next_unpin(cx) + } +} + +impl Stream for &mut S { + type Item = S::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + S::poll_next(Pin::new(&mut **self), cx) + } + + fn size_hint(&self) -> (usize, Option) { + (**self).size_hint() + } +} + +/// A `Sink` is a value into which other values can be sent, asynchronously. +pub trait Sink { + /// The type of value produced by the sink when an error occurs. + type Error; + + /// Attempts to prepare the `Sink` to receive a value. + /// + /// This method must be called and return `Poll::Ready(Ok(()))` prior to + /// each call to `start_send`. + /// + /// This method returns `Poll::Ready` once the underlying sink is ready to + /// receive data. If this method returns `Poll::Pending`, the current task + /// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready` + /// should be called again. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Begin the process of sending a value to the sink. + /// Each call to this function must be preceded by a successful call to + /// `poll_ready` which returned `Poll::Ready(Ok(()))`. + /// + /// As the name suggests, this method only *begins* the process of sending + /// the item. If the sink employs buffering, the item isn't fully processed + /// until the buffer is fully flushed. Since sinks are designed to work with + /// asynchronous I/O, the process of actually writing out the data to an + /// underlying object takes place asynchronously. **You *must* use + /// `poll_flush` or `poll_close` in order to guarantee completion of a + /// send**. + /// + /// Implementations of `poll_ready` and `start_send` will usually involve + /// flushing behind the scenes in order to make room for new messages. + /// It is only necessary to call `poll_flush` if you need to guarantee that + /// *all* of the items placed into the `Sink` have been sent. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error>; + + /// Flush any remaining output from this sink. + /// + /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. If this + /// value is returned then it is guaranteed that all previous values sent + /// via `start_send` have been flushed. + /// + /// Returns `Poll::Pending` if there is more work left to do, in which + /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when + /// `poll_flush` should be called again. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Flush any remaining output and close this sink, if necessary. + /// + /// Returns `Poll::Ready(Ok(()))` when no buffered items remain and the sink + /// has been successfully closed. + /// + /// Returns `Poll::Pending` if there is more work left to do, in which + /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when + /// `poll_close` should be called again. + /// + /// If this function encounters an error, the sink should be considered to + /// have failed permanently, and no more `Sink` methods should be called. + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/aio_event/mod.rs b/ylong_io/src/sys/unix/aio/aio_event/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..64c9931003e5ac3a40aa56c8f2c6c62ec73a1ded --- /dev/null +++ b/ylong_io/src/sys/unix/aio/aio_event/mod.rs @@ -0,0 +1,17 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +mod aio_eventfd; +mod eventfd_trait; + +pub use aio_eventfd::{AioEventFd, EventFdError}; +pub use eventfd_trait::{Sink, Stream, StreamExt}; \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/aio_kernel.rs b/ylong_io/src/sys/unix/aio/aio_kernel.rs new file mode 100644 index 0000000000000000000000000000000000000000..2a50b74729f9f5981d18a1588f2d03435106c68a --- /dev/null +++ b/ylong_io/src/sys/unix/aio/aio_kernel.rs @@ -0,0 +1,120 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use libc::{__s16, __s64, __u16, __u32, __u64, c_int, timespec}; + + +// Linux system constant, used to create AIO context +const __NR_IO_SETUP: u32 = 206; +// Linux system constant, used to destroy AIO context +const __NR_IO_DESTROY: u32 = 207; +// Linux system constant, used to retrieve AIO completion events +const __NR_IO_GET_EVENTS: u32 = 208; +// Linux system constant, used for submitting AIO requests +const __NR_IO_SUBMIT: u32 = 209; + +/// Linux system type, used to identify AIO context type +pub type AioContextT = std::os::raw::c_ulong; +/// Linux system type, used to identify AIO result type +pub type AioResult = std::os::raw::c_longlong; + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct IoEvent { + pub data: __u64, + pub obj: __u64, + pub res: __s64, + pub res2: __s64, +} + +/// libc::iocb +#[repr(C)] +pub struct iocb { + /// + pub aio_data: __u64, + #[cfg(target_endian = "little")] + /// + pub aio_key: __u32, + #[cfg(target_endian = "little")] + /// + pub aio_rw_flags: c_int, + #[cfg(target_endian = "big")] + /// + pub aio_rw_flags: c_int, + #[cfg(target_endian = "big")] + /// + pub aio_key: __u32, + /// + pub aio_lio_opcode: __u16, + /// + pub aio_reqprio: __s16, + /// + pub aio_fildes: __u32, + /// + pub aio_buf: __u64, + /// + pub aio_nbytes: __u64, + /// + pub aio_offset: __s64, + aio_reserved2: __u64, + /// + pub aio_flags: __u32, + /// + pub aio_resfd: __u32, +} + +/// Initialize an AIO context for a given submission queue size within the kernel. +/// +/// # Safety +/// The input parameters meet the requirements of the Linux kernel +#[inline(always)] +pub unsafe fn io_setup(nr: libc::c_long, ctxp: *mut AioContextT) -> libc::c_long { + unsafe {libc::syscall(__NR_IO_SETUP as libc::c_long, nr, ctxp)} +} + +/// Destroy an AIO context. +/// +/// # Safety +/// The input parameters meet the requirements of the Linux kernel +#[inline(always)] +pub unsafe fn io_destroy(ctx: AioContextT) -> libc::c_long { + unsafe {libc::syscall(__NR_IO_DESTROY as libc::c_long, ctx)} +} + +/// Submit a batch of IO operations. +/// +/// # Safety +/// The input parameters meet the requirements of the Linux kernel +#[inline(always)] +pub unsafe fn io_submit( + ctx: AioContextT, + nr: libc::c_long, + iocbpp: *mut *mut iocb, +) -> libc::c_long { + unsafe {libc::syscall(__NR_IO_SUBMIT as libc::c_long, ctx, nr, iocbpp)} +} + +/// Retrieve completion events for previously submitted IO requests. +/// +/// # Safety +/// The input parameters meet the requirements of the Linux kernel +#[inline(always)] +pub unsafe fn io_get_events( + ctx: AioContextT, + min_nr: libc::c_long, + max_nr: libc::c_long, + events: *mut IoEvent, + timeout: *mut timespec, +) -> libc::c_long { + unsafe {libc::syscall(__NR_IO_GET_EVENTS as libc::c_long, ctx, min_nr, max_nr, events, timeout)} +} diff --git a/ylong_io/src/sys/unix/aio/atomic_node.rs b/ylong_io/src/sys/unix/aio/atomic_node.rs new file mode 100644 index 0000000000000000000000000000000000000000..9fd664d6bf05993a8da57884e473377a54892eb9 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/atomic_node.rs @@ -0,0 +1,352 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cell::Cell; +use std::fmt; +use std::ptr::NonNull; +use std::sync::atomic::{AtomicBool, Ordering}; +use crate::sys::{Adapter, DefaultLinkOps, LinkedListOps, LinkOps, PointerOps}; + +/// Node for intrusive collection, suitable for multi-threaded environments +pub struct AtomicNode { + locked: AtomicBool, + next: Cell>>, + perv: Cell>>, +} +unsafe impl Sync for AtomicNode {} + +const UNLINKED_MARKER: Option> = + unsafe { Some(NonNull::new_unchecked(1 as *mut AtomicNode)) }; + +impl AtomicNode { + /// Creates a new `Node`. + #[inline] + pub fn new() -> AtomicNode { + AtomicNode { + locked: AtomicBool::new(false), + next: Cell::new(UNLINKED_MARKER), + perv: Cell::new(UNLINKED_MARKER), + } + } +} + +impl DefaultLinkOps for AtomicNode { + type Ops = AtomicNodeOps; + const NEW: Self::Ops = AtomicNodeOps; +} +unsafe impl Send for AtomicNode {} +impl Clone for AtomicNode { + #[inline] + fn clone(&self) -> Self { + AtomicNode::new() + } +} +impl Default for AtomicNode { + #[inline] + fn default() -> Self { + AtomicNode::new() + } +} + +impl fmt::Debug for AtomicNode { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.locked.load(Ordering::Relaxed) { + write!(f, "linked") + } else { + write!(f, "unlinked") + } + } +} + +#[derive(Clone, Copy, Default, Debug)] +pub struct AtomicNodeOps; + +unsafe impl LinkOps for AtomicNodeOps { + type LinkPtr = NonNull; + #[inline] + unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool { + !ptr.as_ref().locked.swap(true, Ordering::Acquire) + } + #[inline] + unsafe fn release_link(&mut self, ptr: Self::LinkPtr) { + ptr.as_ref().locked.store(false, Ordering::Release) + } +} + +unsafe impl LinkedListOps for AtomicNodeOps { + #[inline] + unsafe fn next(&self, ptr: Self::LinkPtr) -> Option { + ptr.as_ref().next.get() + } + #[inline] + unsafe fn prev(&self, ptr: Self::LinkPtr) -> Option { + ptr.as_ref().perv.get() + } + #[inline] + unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option) { + ptr.as_ref().next.set(next) + } + #[inline] + unsafe fn set_prev(&mut self, ptr: Self::LinkPtr, prev: Option) { + ptr.as_ref().perv.set(prev) + } +} +/// An intrusive doubly-linked list. +/// +/// When this collection is dropped, all elements linked into it will be +/// converted back to owned pointers and dropped. +pub struct LinkedList + where + A::LinkOps: LinkedListOps, +{ + head: Option<::LinkPtr>, + tail: Option<::LinkPtr>, + adapter: A, +} + +pub struct CursorMut<'a, A: Adapter> + where + A::LinkOps: LinkedListOps, +{ + current: Option<::LinkPtr>, + list: &'a mut LinkedList, +} + +impl<'a, A: Adapter> CursorMut<'a, A> + where + A::LinkOps: LinkedListOps, +{ + #[inline] + fn move_next(&mut self) { + if let Some(current) = self.current { + self.current = unsafe { self.list.adapter.link_ops().next(current) }; + } else { + self.current = self.list.head; + } + } + #[inline] + fn insert_before(&mut self, val: ::Pointer) { + unsafe { + let new = self.list.node_from_value(val); + + let link_ops = self.list.adapter.link_ops_mut(); + + if let Some(current) = self.current { + link_before(link_ops, new, current); + } else { + link_between(link_ops, new, self.list.tail, None); + self.list.tail = Some(new); + } + if self.list.head == self.current { + self.list.head = Some(new); + } + } + } + /// Removes the current element from the `LinkedList`. + /// + /// A pointer to the element that was removed is returned, and the cursor is + /// moved to point to the next element in the `LinkedList`. + /// + /// If the cursor is currently pointing to the null object then no element + /// is removed and `None` is returned. + #[inline] + pub fn remove(&mut self) -> Option<::Pointer> { + unsafe { + if let Some(current) = self.current { + if self.list.head == self.current { + self.list.head = self.list.adapter.link_ops().next(current); + } + if self.list.tail == self.current { + self.list.tail = self.list.adapter.link_ops().prev(current); + } + let next = self.list.adapter.link_ops().next(current); + let result = current; + remove(self.list.adapter.link_ops_mut(), current); + self.current = next; + Some( + self.list + .adapter + .pointer_ops().value_to_pointer(self.list.adapter.get_value(result)), + ) + } else { + None + } + } + } +} + +impl LinkedList +where + A::LinkOps: LinkedListOps, +{ + #[inline] + fn node_from_value( + &mut self, + val: ::Pointer, + ) -> ::LinkPtr { + use LinkOps; + + unsafe { + let raw = self.adapter.pointer_ops().pointer_to_value(val); + let link = self.adapter.get_link(raw); + + if !self.adapter.link_ops_mut().acquire_link(link) { + // convert the node back into a pointer + self.adapter.pointer_ops().value_to_pointer(raw); + + panic!("attempted to insert an object that is already linked"); + } + + link + } + } + + /// Creates an empty LinkedList. + #[inline] + pub fn new(adapter: A) -> LinkedList { + LinkedList { + head: None, + tail: None, + adapter, + } + } + + /// Removes all elements from the `LinkedList`. + /// + /// This will unlink all object currently in the list, which requires + /// iterating through all elements in the `LinkedList`. Each element is + /// converted back to an owned pointer and then dropped. + #[inline] + pub fn clear(&mut self) { + use LinkOps; + + let mut current = self.head; + self.head = None; + self.tail = None; + while let Some(x) = current { + unsafe { + let next = self.adapter.link_ops().next(x); + self.adapter.link_ops_mut().release_link(x); + self.adapter + .pointer_ops().value_to_pointer(self.adapter.get_value(x)); + current = next; + } + } + } + + /// Returns a null `CursorMut` for this list. + #[inline] + pub fn cursor_mut(&mut self) -> CursorMut<'_, A> { + CursorMut { + current: None, + list: self, + } + } + + /// Returns a `CursorMut` pointing to the first element of the list. If the + /// the list is empty then a null cursor is returned. + #[inline] + pub fn front_mut(&mut self) -> CursorMut<'_, A> { + let mut cursor = self.cursor_mut(); + cursor.move_next(); + cursor + } + + /// Inserts a new element at the end of the `LinkedList`. + #[inline] + pub fn push_back(&mut self, val: ::Pointer) { + self.cursor_mut().insert_before(val); + } + + /// Inserts a new element at the start of the `LinkedList`. + #[inline] + pub fn pop_front(&mut self) -> Option<::Pointer> { + self.front_mut().remove() + } + + /// Creates a `CursorMut` from a pointer to an element. + /// + /// # Safety + /// + /// `ptr` must be a pointer to an object that is part of this list. + #[inline] + pub unsafe fn cursor_mut_from_ptr( + &mut self, + ptr: *const ::Value, + ) -> CursorMut<'_, A> { + CursorMut { + current: Some(self.adapter.get_link(ptr)), + list: self, + } + } +} + +unsafe impl Sync for LinkedList + where + ::Value: Sync, + A::LinkOps: LinkedListOps, +{ +} + +unsafe impl Send for LinkedList + where + ::Pointer: Send, + A::LinkOps: LinkedListOps, +{ +} +#[inline] +unsafe fn remove(link_ops: &mut T, ptr: T::LinkPtr) { + let prev = link_ops.prev(ptr); + let next = link_ops.next(ptr); + + if let Some(next) = next { + link_ops.set_prev(next, prev); + } + if let Some(prev) = prev { + link_ops.set_next(prev, next); + } + link_ops.release_link(ptr); +} + +impl Drop for LinkedList + where + A::LinkOps: LinkedListOps, +{ + #[inline] + fn drop(&mut self) { + self.clear(); + } +} + +#[inline] +unsafe fn link_before(link_ops: &mut T, ptr: T::LinkPtr, next: T::LinkPtr) { + link_between(link_ops, ptr, link_ops.prev(next), Some(next)); +} +#[inline] +unsafe fn link_between( + link_ops: &mut T, + ptr: T::LinkPtr, + prev: Option, + next: Option, +) { + if let Some(prev) = prev { + link_ops.set_next(prev, Some(ptr)); + } + if let Some(next) = next { + link_ops.set_prev(next, Some(ptr)); + } + link_ops.set_next(ptr, next); + link_ops.set_prev(ptr, prev); +} + diff --git a/ylong_io/src/sys/unix/aio/file_command.rs b/ylong_io/src/sys/unix/aio/file_command.rs new file mode 100644 index 0000000000000000000000000000000000000000..3875b71b98bc3fdadd72e9df41d290c0cb1ac7a0 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/file_command.rs @@ -0,0 +1,120 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::sys::unix::aio::lockedbuf::locked_buf::{LifetimeExtender, LockedBuf}; + +const IOCB_CMD_PREAD: u32 = 0; +const IOCB_CMD_PWRITE: u32 = 1; +const IOCB_CMD_FSYNC: u32 = 2; +const IOCB_CMD_FDSYNC: u32 = 3; + +/// Raw AIO command +pub enum FileCommand<'a> { + /// Read + Read { + /// Offset + offset: u64, + /// Buffer + buffer: &'a mut LockedBuf, + /// Read flags + flags: u32, + /// Optional len + len: u64, + }, + /// Write + Write { + /// Offset + offset: u64, + /// Buffer + buffer: &'a LockedBuf, + /// Write flags + flags: u32, + /// Optional len + len: u64, + }, + /// Sync data only + Fdsync, + /// Sync data and metadata + Fsync, +} +impl<'a> FileCommand<'a> { + /// I/O operation type + pub fn operation_code(&self) -> u32 { + match self { + // IOCB_CMD_XXXX指定异步I/O操作的类型 + FileCommand::Read { .. } => IOCB_CMD_PREAD, + FileCommand::Write { .. } => IOCB_CMD_PWRITE, + FileCommand::Fdsync => IOCB_CMD_FDSYNC, + FileCommand::Fsync => IOCB_CMD_FSYNC, + } + } + /// I/O operation file offset + pub fn offset(&self) -> Option { + match *self { + FileCommand::Read { offset, .. } => Some(offset), + FileCommand::Write { offset, .. } => Some(offset), + FileCommand::Fdsync => None, + FileCommand::Fsync => None, + } + } + /// I/O operation data length + pub fn len(&self) -> Option { + match *self { + FileCommand::Read { len, .. } => Some(len), + FileCommand::Write { len, .. } => Some(len), + FileCommand::Fdsync => None, + FileCommand::Fsync => None, + } + } + + /// I/O operation data is_empty + pub fn is_empty(&self) -> Option { + match *self { + FileCommand::Read { len, .. } => Some(len == 0), + FileCommand::Write { len, .. } => Some(len == 0), + FileCommand::Fdsync => None, + FileCommand::Fsync => None, + } + } + + /// I/O operation buffer address + pub fn buffer_addr(&self) -> Option<(u64, u64)> { + match self { + FileCommand::Read { buffer, .. } => Some(buffer.aio_addr_and_len()), + FileCommand::Write { buffer, .. } => Some(buffer.aio_addr_and_len()), + FileCommand::Fdsync => None, + FileCommand::Fsync => None, + } + } + /// I/O operation flag code + pub fn flags(&self) -> Option { + match self { + FileCommand::Read { flags, .. } => Some(*flags), + FileCommand::Write { flags, .. } => Some(*flags), + FileCommand::Fdsync => None, + FileCommand::Fsync => None, + } + } + /// Extend the lifecycle of I/O operation buffer + pub fn lifetime_extender(&self) -> Option { + match self { + FileCommand::Read { buffer, .. } => Some(buffer.lifetime_extender()), + FileCommand::Write { buffer, .. } => Some(buffer.lifetime_extender()), + FileCommand::Fdsync => None, + FileCommand::Fsync => None, + } + } +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/flag.rs b/ylong_io/src/sys/unix/aio/flag.rs new file mode 100644 index 0000000000000000000000000000000000000000..0321a24b2009678677fdaa6bd20484d4c21ed738 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/flag.rs @@ -0,0 +1,61 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// per-IO O_DSYNC +const RWF_DSYNC: u32 = 0x2; + +// per-IO O_SYNC +const RWF_SYNC: u32 = 0x4; + +// per-IO O_APPEND +const RWF_APPEND: u32 = 0x10; + +// high priority request, poll if possible +const RWF_HIPRI: u32 = 0x1; + +// per-IO, return -EAGAIN if operation would block +const RWF_NOWAIT: u32 = 0x8; + +/// AIO write flags. +pub enum WriteFlags { + /// Append data to the end of the file. + Append = RWF_APPEND as isize, + /// Write operation complete according to requirement + /// of synchronized I/O data integrity. + Dsync = RWF_DSYNC as isize, + /// High priority request, poll if possible + Hipri = RWF_HIPRI as isize, + /// Don't wait if the I/O will block for operations + /// such as file block allocations, dirty page flush, + /// mutex locks, or a congested block device inside the + /// kernel. + Nowait = RWF_NOWAIT as isize, + /// Write operation complete according to requirement + /// of synchronized I/O file integrity. + Sync = RWF_SYNC as isize, + /// Empty + Empty = 0, +} + +/// AIO read flags. +pub enum ReadFlags { + /// High priority request, poll if possible + Hipri = RWF_HIPRI as isize, + /// Don't wait if the I/O will block for operations + /// such as file block allocations, dirty page flush, + /// mutex locks, or a congested block device inside the + /// kernel. + Nowait = RWF_NOWAIT as isize, + /// Empty + Empty = 0, +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/intrusive_adapter/intrusive_trait.rs b/ylong_io/src/sys/unix/aio/intrusive_adapter/intrusive_trait.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa4f2e4d51ff0b04bfbe080f241f17441f8baa86 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/intrusive_adapter/intrusive_trait.rs @@ -0,0 +1,173 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Base trait for link operations. +/// +/// `LinkPtr` is the representation of a link pointer. +/// Typically this is `NonNull`, but compact representations such +/// as `u8` or `u16` are possible. +/// +/// # Safety +/// It is necessary to ensure that 'ptr' is a valid pointer +pub unsafe trait LinkOps { + /// The link pointer type. + type LinkPtr: Copy + Eq; + /// Attempts to acquire ownership of a link so that it can be used in an + /// intrusive collection. + /// + /// # Safety + /// If this function succeeds then the intrusive collection will have + /// exclusive access to the link until `release_link` is called. + unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool; + /// Releases ownership of a link that was previously acquired with `acquire_link`. + /// + /// # Safety + /// An implementation of `release_link` must not panic. + unsafe fn release_link(&mut self, ptr:Self::LinkPtr); +} +/// The default implementation of `LinkOps` associated with a link type. +pub trait DefaultLinkOps { + /// The default link operations. + type Ops: LinkOps + Default; + + /// The associated constant that represents `Ops::default()`. + /// + /// This exists because `Default::default()` is not a constant function. + const NEW: Self::Ops; +} + +/// Link operations for `LinkedList`. +/// # Safety +/// It is necessary to ensure that 'ptr' is a valid pointer +pub unsafe trait LinkedListOps: LinkOps { + /// Returns the "next" link pointer of `ptr`. + /// + /// # Safety + /// An implementation of `next` must not panic. + unsafe fn next(&self, ptr: Self::LinkPtr) -> Option; + + /// Returns the "prev" link pointer of `ptr`. + /// + /// # Safety + /// An implementation of `prev` must not panic. + unsafe fn prev(&self, ptr: Self::LinkPtr) -> Option; + + /// Sets the "next" link pointer of `ptr`. + /// + /// # Safety + /// An implementation of `set_next` must not panic. + unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option); + + /// Sets the "prev" link pointer of `ptr`. + /// + /// # Safety + /// An implementation of `set_prev` must not panic. + unsafe fn set_prev(&mut self, ptr: Self::LinkPtr, prev: Option); +} + +/// Trait for pointer conversion operations. +/// +/// `Value` is the actual object type managed by the collection. This type will +/// typically have a link as a struct field. +/// +/// `Pointer` is a pointer type which "owns" an object of type `Value`. +/// Operations which insert an element into an intrusive collection will accept +/// such a pointer and operations which remove an element will return this type. +/// +/// # Safety +/// It is necessary to ensure that 'value' is a valid raw pointer pointing +/// to a valid 'value' instance. +pub unsafe trait PointerOps { + /// Object type which is inserted into an intrusive collection. + type Value: ?Sized; + /// Pointer type which owns an instance of a value. + type Pointer; + + /// Constructs an owned pointer from a raw pointer. + /// + /// # Safety + /// The raw pointer must have been previously returned by `into_raw`. + /// + /// An implementation of `from_raw` must not panic. + unsafe fn value_to_pointer(&self, value: *const Self::Value) -> Self::Pointer; + + /// Consumes the owned pointer and returns a raw pointer to the owned object. + fn pointer_to_value(&self, ptr: Self::Pointer) -> *const Self::Value; +} + +/// Trait for a adapter which allows a type to be inserted into an intrusive +/// collection. +/// +/// `LinkOps` implements the collection-specific operations which +/// allows an object to be inserted into an intrusive collection. This type +/// needs to implement the appropriate trait for the collection type +/// (eg. `LinkedListOps` for inserting into a `LinkedList`). +/// `LinkOps` type may be stateful, allowing custom link types. +/// +/// `PointerOps` implements the collection-specific pointer conversions which +/// allow an object to be inserted into an intrusive collection. +/// `PointerOps` type may be stateful, allowing custom pointer types. +/// +/// A single object type may have multiple adapters, which allows it to be part +/// of multiple intrusive collections simultaneously. +/// +/// In most cases you do not need to implement this trait manually: the +/// `intrusive_adapter!` macro will generate the necessary implementation for a +/// given type and its link field. However it is possible to implement it +/// manually if the intrusive link is not a direct field of the object type. +/// +/// It is also possible to create stateful adapters. +/// This allows links and containers to be separated and avoids the need for objects to be modified to +/// contain a link. +/// +/// # Safety +/// +/// It must be possible to get back a reference to the container by passing a +/// pointer returned by `get_link` to `get_container`. +pub unsafe trait Adapter { + /// Collection-specific link operations which allow an object to be inserted in + /// an intrusive collection. + type LinkOps: LinkOps; + /// Collection-specific pointer conversions which allow an object to + /// be inserted in an intrusive collection. + type PointerOps: PointerOps; + + /// Gets a reference to an object from a reference to a link in that object. + /// + /// # Safety + /// + /// `link` must be a valid pointer previously returned by `get_link`. + unsafe fn get_value( + &self, + link: ::LinkPtr, + ) -> *const ::Value; + + /// Gets a reference to the link for the given object. + /// + /// # Safety + /// + /// `value` must be a valid pointer. + unsafe fn get_link( + &self, + value: *const ::Value, + ) -> ::LinkPtr; + + /// Returns a reference to the link operations. + fn link_ops(&self) -> &Self::LinkOps; + + /// Returns a reference to the mutable link operations. + fn link_ops_mut(&mut self) -> &mut Self::LinkOps; + + /// Returns a reference to the pointer converter. + fn pointer_ops(&self) -> &Self::PointerOps; +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/intrusive_adapter/link.rs b/ylong_io/src/sys/unix/aio/intrusive_adapter/link.rs new file mode 100644 index 0000000000000000000000000000000000000000..987a1e05683539e9fa4d6fd8ff0df11b03be7b1b --- /dev/null +++ b/ylong_io/src/sys/unix/aio/intrusive_adapter/link.rs @@ -0,0 +1,137 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::cell::Cell; +use std::fmt; +use std::ptr::NonNull; +use crate::sys::unix::aio::intrusive_adapter::intrusive_trait::{DefaultLinkOps, LinkedListOps}; +use crate::sys::unix::aio::intrusive_adapter::intrusive_trait::LinkOps as intrusive_ops; + +/// Intrusive link that allows an object to be inserted into a +/// `LinkedList`. +#[repr(align(2))] +pub struct Link { + next: Cell>>, + prev: Cell>>, +} +const UNLINKED_MARKER: Option> = + unsafe { Some(NonNull::new_unchecked(1 as *mut Link)) }; + +impl Link { + /// Creates a new `Link`. + #[inline] + pub const fn new() -> Link { + Link { + next: Cell::new(UNLINKED_MARKER), + prev: Cell::new(UNLINKED_MARKER), + } + } + + /// Checks whether the `Link` is linked into a `LinkedList`. + #[inline] + pub fn is_linked(&self) -> bool { + self.next.get() != UNLINKED_MARKER + } + + /// Forcibly unlinks an object from a `LinkedList`. + /// + /// # Safety + /// + /// It is undefined behavior to call this function while still linked into a + /// `LinkedList`. The only situation where this function is useful is + /// after calling `fast_clear` on a `LinkedList`, since this clears + /// the collection without marking the nodes as unlinked. + #[inline] + pub unsafe fn force_unlink(&self) { + self.next.set(UNLINKED_MARKER); + } +} + +#[derive(Clone, Copy, Default)] +pub struct LinkOps; + +unsafe impl intrusive_ops for LinkOps { + type LinkPtr = NonNull; + + #[inline] + unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool { + if ptr.as_ref().is_linked() { + false + } else { + ptr.as_ref().next.set(None); + true + } + } + + #[inline] + unsafe fn release_link(&mut self, ptr: Self::LinkPtr) { + ptr.as_ref().next.set(UNLINKED_MARKER); + } +} + +unsafe impl LinkedListOps for LinkOps { + #[inline] + unsafe fn next(&self, ptr: Self::LinkPtr) -> Option { + ptr.as_ref().next.get() + } + + #[inline] + unsafe fn prev(&self, ptr: Self::LinkPtr) -> Option { + ptr.as_ref().prev.get() + } + + #[inline] + unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option) { + ptr.as_ref().next.set(next); + } + + #[inline] + unsafe fn set_prev(&mut self, ptr: Self::LinkPtr, prev: Option) { + ptr.as_ref().prev.set(prev); + } +} + +impl DefaultLinkOps for Link { + type Ops = LinkOps; + + const NEW: Self::Ops = LinkOps; +} + +unsafe impl Send for Link {} + +impl Clone for Link { + #[inline] + fn clone(&self) -> Link { + Link::new() + } +} + +impl Default for Link { + #[inline] + fn default() -> Link { + Link::new() + } +} + +impl fmt::Debug for Link { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // There isn't anything sensible to print here except whether the link + // is currently in a list. + if self.is_linked() { + write!(f, "linked") + } else { + write!(f, "unlinked") + } + } +} diff --git a/ylong_io/src/sys/unix/aio/intrusive_adapter/mod.rs b/ylong_io/src/sys/unix/aio/intrusive_adapter/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a97ec951d5549ae2720d2c69ae8a16a75ea496e --- /dev/null +++ b/ylong_io/src/sys/unix/aio/intrusive_adapter/mod.rs @@ -0,0 +1,20 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +mod intrusive_trait; +mod raw_mutex; +mod link; +mod word_lock; + +pub use intrusive_trait::{ DefaultLinkOps, LinkOps, LinkedListOps, Adapter, PointerOps}; +pub use raw_mutex::RawMutex; +pub use link::Link; \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/intrusive_adapter/raw_mutex.rs b/ylong_io/src/sys/unix/aio/intrusive_adapter/raw_mutex.rs new file mode 100644 index 0000000000000000000000000000000000000000..229c01eaf635c8edcdfe0d70f081ebe08efb986b --- /dev/null +++ b/ylong_io/src/sys/unix/aio/intrusive_adapter/raw_mutex.rs @@ -0,0 +1,716 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::ptr; +use std::cell::{Cell, UnsafeCell}; +use std::sync::atomic::{AtomicPtr, AtomicU8, AtomicUsize, Ordering}; +use std::time::{Duration, Instant}; +use crate::sys::unix::aio::intrusive_adapter::word_lock::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN, ParkResult, ParkToken, SpinWait, ThreadParker, ThreadParkerT, UnparkHandleT, UnparkToken, WordLock}; +use crate::sys::unix::aio::noop_lock::RawMutex as raw_trait; + +static HASHTABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); +static NUM_THREADS: AtomicUsize = AtomicUsize::new(0); + + +const LOCKED_BIT: u8 = 0b01; +const PARKED_BIT: u8 = 0b10; + +const LOAD_FACTOR: usize = 3; +pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); +pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); + +#[non_exhaustive] +#[derive(Copy, Clone, Default, Eq, PartialEq, Debug)] +pub struct UnparkResult { + /// The number of threads that were unparked. + pub unparked_threads: usize, + + /// The number of threads that were requeued. + pub requeued_threads: usize, + + /// Whether there are any threads remaining in the queue. This only returns + /// true if a thread was unparked. + pub have_more_threads: bool, + + /// This is set to true on average once every 0.5ms for any given key. It + /// should be used to switch to a fair unlocking mechanism for a particular + /// unlock. + pub be_fair: bool, +} + +/// This atomic integer holds the current state of the mutex instance. +pub struct RawMutex { + pub(crate) state: AtomicU8, +} + +pub struct GuardNoSend(()); +unsafe impl raw_trait for RawMutex { + #[allow(clippy::declare_interior_mutable_const)] + const INIT: RawMutex = RawMutex { + state: AtomicU8::new(0), + }; + type GuardMarker = GuardNoSend; + + fn lock(&self) { + if self + .state + .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + self.lock_slow(None); + } + } + + fn try_lock(&self) -> bool { + let mut state = self.state.load(Ordering::Relaxed); + loop { + if state & LOCKED_BIT != 0 { + return false; + } + match self.state.compare_exchange_weak( + state, + state | LOCKED_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => { + return true; + } + Err(x) => state = x, + } + } + } + + fn unlock(&self) { + if self + .state + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() + { + return; + } + self.unlock_slow(false); + } +} + +impl RawMutex { + #[cold] + fn lock_slow(&self, timeout: Option) -> bool { + let mut spinwait = SpinWait::new(); + let mut state = self.state.load(Ordering::Relaxed); + loop { + // Grab the lock if it isn't locked, even if there is a queue on it + if state & LOCKED_BIT == 0 { + match self.state.compare_exchange_weak( + state, + state | LOCKED_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => state = x, + } + continue; + } + + // If there is no queue, try spinning a few times + if state & PARKED_BIT == 0 && spinwait.spin() { + state = self.state.load(Ordering::Relaxed); + continue; + } + + // Set the parked bit + if state & PARKED_BIT == 0 { + if let Err(x) = self.state.compare_exchange_weak( + state, + state | PARKED_BIT, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + state = x; + continue; + } + } + + // Park our thread until we are woken up by an unlock + let addr = self as *const _ as usize; + let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + match unsafe { + park( + addr, + validate, + before_sleep, + timed_out, + DEFAULT_PARK_TOKEN, + timeout, + ) + } { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, + + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), + + // The validation function failed, try locking again + ParkResult::Invalid => (), + + // Timeout expired + ParkResult::TimedOut => return false, + } + + // Loop back and try locking again + spinwait.reset(); + state = self.state.load(Ordering::Relaxed); + } + } + #[cold] + fn unlock_slow(&self, force_fair: bool) { + // Unpark one thread and leave the parked bit set if there might + // still be parked threads on this address. + let addr = self as *const _ as usize; + let callback = |result: UnparkResult| { + // If we are using a fair unlock then we should keep the + // mutex locked and hand it off to the unparked thread. + if result.unparked_threads != 0 && (force_fair || result.be_fair) { + // Clear the parked bit if there are no more parked + // threads. + if !result.have_more_threads { + self.state.store(LOCKED_BIT, Ordering::Relaxed); + } + return TOKEN_HANDOFF; + } + + // Clear the locked bit, and the parked bit as well if there + // are no more parked threads. + if result.have_more_threads { + self.state.store(PARKED_BIT, Ordering::Release); + } else { + self.state.store(0, Ordering::Release); + } + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. + unsafe { + unpark_one(addr, callback); + } + } +} + +struct ThreadData { + parker: ThreadParker, + + // Key that this thread is sleeping on. This may change if the thread is + // requeued to a different key. + key: AtomicUsize, + + // Linked list of parked threads in a bucket + next_in_queue: Cell<*const ThreadData>, + + // UnparkToken passed to this thread when it is unparked + unpark_token: Cell, + + // ParkToken value set by the thread when it was parked + park_token: Cell, + + // Is the thread parked with a timeout? + parked_with_timeout: Cell, +} +impl ThreadData { + fn new() -> ThreadData { + // Keep track of the total number of live ThreadData objects and resize + // the hash table accordingly. + let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1; + grow_hashtable(num_threads); + + ThreadData { + parker: ThreadParker::new(), + key: AtomicUsize::new(0), + next_in_queue: Cell::new(ptr::null()), + unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN), + park_token: Cell::new(DEFAULT_PARK_TOKEN), + parked_with_timeout: Cell::new(false), + } + } +} +impl Drop for ThreadData { + fn drop(&mut self) { + NUM_THREADS.fetch_sub(1, Ordering::Relaxed); + } +} + +pub unsafe fn park( + key: usize, + validate: impl FnOnce() -> bool, + before_sleep: impl FnOnce(), + timed_out: impl FnOnce(usize, bool), + park_token: ParkToken, + timeout: Option, +) -> ParkResult { + // Grab our thread data, this also ensures that the hash table exists + with_thread_data(|thread_data| { + // Lock the bucket for the given key + let bucket = lock_bucket(key); + + // If the validation function fails, just return + if !validate() { + // SAFETY: We hold the lock here, as required + bucket.mutex.unlock(); + return ParkResult::Invalid; + } + + // Append our thread data to the queue and unlock the bucket + thread_data.parked_with_timeout.set(timeout.is_some()); + thread_data.next_in_queue.set(ptr::null()); + thread_data.key.store(key, Ordering::Relaxed); + thread_data.park_token.set(park_token); + thread_data.parker.prepare_park(); + if !bucket.queue_head.get().is_null() { + (*bucket.queue_tail.get()).next_in_queue.set(thread_data); + } else { + bucket.queue_head.set(thread_data); + } + bucket.queue_tail.set(thread_data); + // SAFETY: We hold the lock here, as required + bucket.mutex.unlock(); + + // Invoke the pre-sleep callback + before_sleep(); + + // Park our thread and determine whether we were woken up by an unpark + // or by our timeout. Note that this isn't precise: we can still be + // unparked since we are still in the queue. + let unparked = match timeout { + Some(timeout) => thread_data.parker.park_until(timeout), + None => { + thread_data.parker.park(); + // call deadlock detection on_unpark hook + // deadlock::on_unpark(thread_data); + true + } + }; + + // If we were unparked, return now + if unparked { + return ParkResult::Unparked(thread_data.unpark_token.get()); + } + + // Lock our bucket again. Note that the hashtable may have been rehashed in + // the meantime. Our key may also have changed if we were requeued. + let (key, bucket) = lock_bucket_checked(&thread_data.key); + + // Now we need to check again if we were unparked or timed out. Unlike the + // last check this is precise because we hold the bucket lock. + if !thread_data.parker.timed_out() { + // SAFETY: We hold the lock here, as required + bucket.mutex.unlock(); + return ParkResult::Unparked(thread_data.unpark_token.get()); + } + + // We timed out, so we now need to remove our thread from the queue + let mut link = &bucket.queue_head; + let mut current = bucket.queue_head.get(); + let mut previous = ptr::null(); + let mut was_last_thread = true; + while !current.is_null() { + if current == thread_data { + let next = (*current).next_in_queue.get(); + link.set(next); + if bucket.queue_tail.get() == current { + bucket.queue_tail.set(previous); + } else { + // Scan the rest of the queue to see if there are any other + // entries with the given key. + let mut scan = next; + while !scan.is_null() { + if (*scan).key.load(Ordering::Relaxed) == key { + was_last_thread = false; + break; + } + scan = (*scan).next_in_queue.get(); + } + } + + // Callback to indicate that we timed out, and whether we were the + // last thread on the queue. + timed_out(key, was_last_thread); + break; + } else { + if (*current).key.load(Ordering::Relaxed) == key { + was_last_thread = false; + } + link = &(*current).next_in_queue; + previous = current; + current = link.get(); + } + } + + // There should be no way for our thread to have been removed from the queue + // if we timed out. + debug_assert!(!current.is_null()); + + // Unlock the bucket, we are done + // SAFETY: We hold the lock here, as required + bucket.mutex.unlock(); + ParkResult::TimedOut + }) +} + +struct HashTable { + // Hash buckets for the table + entries: Box<[Bucket]>, + + // Number of bits used for the hash function + hash_bits: u32, + + // Previous table. This is only kept to keep leak detectors happy. + _prev: *const HashTable, +} + +impl HashTable { + #[inline] + fn new(num_threads: usize, prev: *const HashTable) -> Box { + let new_size = (num_threads * LOAD_FACTOR).next_power_of_two(); + let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1; + + let now = Instant::now(); + let mut entries = Vec::with_capacity(new_size); + for i in 0..new_size { + // We must ensure the seed is not zero + entries.push(Bucket::new(now, i as u32 + 1)); + } + + Box::new(HashTable { + entries: entries.into_boxed_slice(), + hash_bits, + _prev: prev, + }) + } +} +struct FairTimeout { + // Next time at which point be_fair should be set + timeout: Instant, + + // the PRNG state for calculating the next timeout + seed: u32, +} +impl FairTimeout { + #[inline] + fn new(timeout: Instant, seed: u32) -> FairTimeout { + FairTimeout { timeout, seed } + } + + // Determine whether we should force a fair unlock, and update the timeout + #[inline] + fn should_timeout(&mut self) -> bool { + let now = Instant::now(); + if now > self.timeout { + // Time between 0 and 1ms. + let nanos = self.gen_u32() % 1_000_000; + self.timeout = now + Duration::new(0, nanos); + true + } else { + false + } + } + + // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. + fn gen_u32(&mut self) -> u32 { + self.seed ^= self.seed << 13; + self.seed ^= self.seed >> 17; + self.seed ^= self.seed << 5; + self.seed + } +} +#[repr(align(64))] +struct Bucket { + // Lock protecting the queue + mutex: WordLock, + + // Linked list of threads waiting on this bucket + queue_head: Cell<*const ThreadData>, + queue_tail: Cell<*const ThreadData>, + + // Next time at which point be_fair should be set + fair_timeout: UnsafeCell, +} +impl Bucket { + #[inline] + pub fn new(timeout: Instant, seed: u32) -> Self { + Self { + mutex: WordLock::new(), + queue_head: Cell::new(ptr::null()), + queue_tail: Cell::new(ptr::null()), + fair_timeout: UnsafeCell::new(FairTimeout::new(timeout, seed)), + } + } +} + +fn lock_bucket(key: usize) -> &'static Bucket { + loop { + let hashtable = get_hashtable(); + + let hash = hash(key, hashtable.hash_bits); + let bucket = &hashtable.entries[hash]; + + // Lock the bucket + bucket.mutex.lock(); + + // If no other thread has rehashed the table before we grabbed the lock + // then we are good to go! The lock we grabbed prevents any rehashes. + if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { + return bucket; + } + + // Unlock the bucket and try again + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; + } +} +#[inline] +pub unsafe fn unpark_one( + key: usize, + callback: impl FnOnce(UnparkResult) -> UnparkToken, +) -> UnparkResult { + // Lock the bucket for the given key + let bucket = lock_bucket(key); + + // Find a thread with a matching key and remove it from the queue + let mut link = &bucket.queue_head; + let mut current = bucket.queue_head.get(); + let mut previous = ptr::null(); + let mut result = UnparkResult::default(); + while !current.is_null() { + if (*current).key.load(Ordering::Relaxed) == key { + // Remove the thread from the queue + let next = (*current).next_in_queue.get(); + link.set(next); + if bucket.queue_tail.get() == current { + bucket.queue_tail.set(previous); + } else { + // Scan the rest of the queue to see if there are any other + // entries with the given key. + let mut scan = next; + while !scan.is_null() { + if (*scan).key.load(Ordering::Relaxed) == key { + result.have_more_threads = true; + break; + } + scan = (*scan).next_in_queue.get(); + } + } + + // Invoke the callback before waking up the thread + result.unparked_threads = 1; + result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); + let token = callback(result); + + // Set the token for the target thread + (*current).unpark_token.set(token); + + // This is a bit tricky: we first lock the ThreadParker to prevent + // the thread from exiting and freeing its ThreadData if its wait + // times out. Then we unlock the queue since we don't want to keep + // the queue locked while we perform a system call. Finally we wake + // up the parked thread. + let handle = (*current).parker.unpark_lock(); + // SAFETY: We hold the lock here, as required + bucket.mutex.unlock(); + handle.unpark(); + + return result; + } else { + link = &(*current).next_in_queue; + previous = current; + current = link.get(); + } + } + + // No threads with a matching key were found in the bucket + callback(result); + // SAFETY: We hold the lock here, as required + bucket.mutex.unlock(); + result +} + +#[cold] +fn create_hashtable() -> &'static HashTable { + let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); + + // If this fails then it means some other thread created the hash table first. + let table = match HASHTABLE.compare_exchange( + ptr::null_mut(), + new_table, + Ordering::AcqRel, + Ordering::Acquire, + ) { + Ok(_) => new_table, + Err(old_table) => { + // Free the table we created + // SAFETY: `new_table` is created from `Box::into_raw` above and only freed here. + unsafe { + let _ = Box::from_raw(new_table); + } + old_table + } + }; + // SAFETY: The `HashTable` behind `table` is never freed. It is either the table pointer we + // created here, or it is one loaded from `HASHTABLE`. + unsafe { &*table } +} + +#[inline] +fn get_hashtable() -> &'static HashTable { + let table = HASHTABLE.load(Ordering::Acquire); + + // If there is no table, create one + if table.is_null() { + create_hashtable() + } else { + // SAFETY: when not null, `HASHTABLE` always points to a `HashTable` that is never freed. + unsafe { &*table } + } +} +fn grow_hashtable(num_threads: usize) { + // Lock all buckets in the existing table and get a reference to it + let old_table = loop { + let table = get_hashtable(); + + // Check if we need to resize the existing table + if table.entries.len() >= LOAD_FACTOR * num_threads { + return; + } + + // Lock all buckets in the old table + for bucket in &table.entries[..] { + bucket.mutex.lock(); + } + + // Now check if our table is still the latest one. Another thread could + // have grown the hash table between us reading HASHTABLE and locking + // the buckets. + if HASHTABLE.load(Ordering::Relaxed) == table as *const _ as *mut _ { + break table; + } + + // Unlock buckets and try again + for bucket in &table.entries[..] { + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; + } + }; + + // Create the new table + let mut new_table = HashTable::new(num_threads, old_table); + + // Move the entries from the old table to the new one + for bucket in &old_table.entries[..] { + // SAFETY: The park, unpark* and check_wait_graph_fast functions create only correct linked + // lists. All `ThreadData` instances in these lists will remain valid as long as they are + // present in the lists, meaning as long as their threads are parked. + unsafe { rehash_bucket_into(bucket, &mut new_table) }; + } + + // Publish the new table. No races are possible at this point because + // any other thread trying to grow the hash table is blocked on the bucket + // locks in the old table. + HASHTABLE.store(Box::into_raw(new_table), Ordering::Release); + + // Unlock all buckets in the old table + for bucket in &old_table.entries[..] { + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; + } +} + +#[inline] +fn lock_bucket_checked(key: &AtomicUsize) -> (usize, &'static Bucket) { + loop { + let hashtable = get_hashtable(); + let current_key = key.load(Ordering::Relaxed); + + let hash = hash(current_key, hashtable.hash_bits); + let bucket = &hashtable.entries[hash]; + + // Lock the bucket + bucket.mutex.lock(); + + // Check that both the hash table and key are correct while the bucket + // is locked. Note that the key can't change once we locked the proper + // bucket for it, so we just keep trying until we have the correct key. + if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ + && key.load(Ordering::Relaxed) == current_key + { + return (current_key, bucket); + } + + // Unlock the bucket and try again + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; + } +} + +#[cfg(target_pointer_width = "32")] +#[inline] +fn hash(key: usize, bits: u32) -> usize { + key.wrapping_mul(0x9E3779B9) >> (32 - bits) +} +#[cfg(target_pointer_width = "64")] +#[inline] +fn hash(key: usize, bits: u32) -> usize { + key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits) +} +#[inline(always)] +fn with_thread_data(f: impl FnOnce(&ThreadData) -> T) -> T { + // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive + // to construct. Try to use a thread-local version if possible. Otherwise just + // create a ThreadData on the stack + let mut thread_data_storage = None; + thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); + let thread_data_ptr = THREAD_DATA + .try_with(|x| x as *const ThreadData) + .unwrap_or_else(|_| thread_data_storage.get_or_insert_with(ThreadData::new)); + + f(unsafe { &*thread_data_ptr }) +} + +unsafe fn rehash_bucket_into(bucket: &'static Bucket, table: &mut HashTable) { + let mut current: *const ThreadData = bucket.queue_head.get(); + while !current.is_null() { + let next = (*current).next_in_queue.get(); + let hash = hash((*current).key.load(Ordering::Relaxed), table.hash_bits); + if table.entries[hash].queue_tail.get().is_null() { + table.entries[hash].queue_head.set(current); + } else { + (*table.entries[hash].queue_tail.get()) + .next_in_queue + .set(current); + } + table.entries[hash].queue_tail.set(current); + (*current).next_in_queue.set(ptr::null()); + current = next; + } +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/intrusive_adapter/word_lock.rs b/ylong_io/src/sys/unix/aio/intrusive_adapter/word_lock.rs new file mode 100644 index 0000000000000000000000000000000000000000..739d90c7b91f79d7b17940ef238ac2a59139ec4d --- /dev/null +++ b/ylong_io/src/sys/unix/aio/intrusive_adapter/word_lock.rs @@ -0,0 +1,557 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{hint, ptr, thread}; +use std::cell::{Cell}; +use std::sync::atomic::{AtomicI32, AtomicUsize, fence, Ordering}; +use std::time::Instant; +use std::mem::align_of; + + +const LOCKED_BIT: usize = 1; +const QUEUE_LOCKED_BIT: usize = 2; +const QUEUE_MASK: usize = !3; +pub const DEFAULT_PARK_TOKEN: ParkToken = ParkToken(0); + +pub const DEFAULT_UNPARK_TOKEN: UnparkToken = UnparkToken(0); + + + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub struct ParkToken(pub usize); + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub struct UnparkToken(pub usize); + + +pub struct UnparkHandle { + futex: *const AtomicI32, +} +impl UnparkHandleT for UnparkHandle { + #[inline] + unsafe fn unpark(self) { + // The thread data may have been freed at this point, but it doesn't + // matter since the syscall will just return EFAULT in that case. + let r = libc::syscall( + libc::SYS_futex, + self.futex, + libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG, + 1, + ); + debug_assert!(r == 0 || r == 1 || r == -1); + if r == -1 { + debug_assert_eq!(errno(), libc::EFAULT); + } + } +} + +#[derive(Default)] +pub struct SpinWait { + counter: u32, +} +impl SpinWait { + #[inline] + pub fn new() -> Self { + Self::default() + } + + #[inline] + pub fn reset(&mut self) { + self.counter = 0; + } + + #[inline] + pub fn spin(&mut self) -> bool { + if self.counter >= 10 { + return false; + } + self.counter += 1; + if self.counter <= 3 { + cpu_relax(1 << self.counter); + } else { + thread_yield(); + } + true + } +} +pub struct WordLock { + state: AtomicUsize, +} +impl WordLock { + /// Returns a new, unlocked, WordLock. + pub const fn new() -> Self { + WordLock { + state: AtomicUsize::new(0), + } + } + + #[inline] + pub fn lock(&self) { + if self + .state + .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + return; + } + self.lock_slow(); + } + + /// Must not be called on an already unlocked `WordLock`! + #[inline] + pub unsafe fn unlock(&self) { + let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release); + if state.is_queue_locked() || state.queue_head().is_null() { + return; + } + self.unlock_slow(); + } + + #[cold] + fn lock_slow(&self) { + let mut spinwait = SpinWait::new(); + let mut state = self.state.load(Ordering::Relaxed); + loop { + // Grab the lock if it isn't locked, even if there is a queue on it + if !state.is_locked() { + match self.state.compare_exchange_weak( + state, + state | LOCKED_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return, + Err(x) => state = x, + } + continue; + } + + // If there is no queue, try spinning a few times + if state.queue_head().is_null() && spinwait.spin() { + state = self.state.load(Ordering::Relaxed); + continue; + } + + // Get our thread data and prepare it for parking + state = with_thread_data(|thread_data| { + // The pthread implementation is still unsafe, so we need to surround `prepare_park` + // with `unsafe {}`. + #[allow(unused_unsafe)] + unsafe { + thread_data.parker.prepare_park(); + } + + // Add our thread to the front of the queue + let queue_head = state.queue_head(); + if queue_head.is_null() { + thread_data.queue_tail.set(thread_data); + thread_data.prev.set(ptr::null()); + } else { + thread_data.queue_tail.set(ptr::null()); + thread_data.prev.set(ptr::null()); + thread_data.next.set(queue_head); + } + if let Err(x) = self.state.compare_exchange_weak( + state, + state.with_queue_head(thread_data), + Ordering::Release, + Ordering::Relaxed, + ) { + return x; + } + + // Sleep until we are woken up by an unlock + // Ignoring unused unsafe, since it's only a few platforms where this is unsafe. + #[allow(unused_unsafe)] + unsafe { + thread_data.parker.park(); + } + + // Loop back and try locking again + spinwait.reset(); + self.state.load(Ordering::Relaxed) + }); + } + } + + #[cold] + fn unlock_slow(&self) { + let mut state = self.state.load(Ordering::Relaxed); + loop { + // We just unlocked the WordLock. Just check if there is a thread + // to wake up. If the queue is locked then another thread is already + // taking care of waking up a thread. + if state.is_queue_locked() || state.queue_head().is_null() { + return; + } + + // Try to grab the queue lock + match self.state.compare_exchange_weak( + state, + state | QUEUE_LOCKED_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(x) => state = x, + } + } + + // Now we have the queue lock and the queue is non-empty + 'outer: loop { + // First, we need to fill in the prev pointers for any newly added + // threads. We do this until we reach a node that we previously + // processed, which has a non-null queue_tail pointer. + let queue_head = state.queue_head(); + let mut queue_tail; + let mut current = queue_head; + loop { + queue_tail = unsafe { (*current).queue_tail.get() }; + if !queue_tail.is_null() { + break; + } + unsafe { + let next = (*current).next.get(); + (*next).prev.set(current); + current = next; + } + } + + // Set queue_tail on the queue head to indicate that the whole list + // has prev pointers set correctly. + unsafe { + (*queue_head).queue_tail.set(queue_tail); + } + + // If the WordLock is locked, then there is no point waking up a + // thread now. Instead we let the next unlocker take care of waking + // up a thread. + if state.is_locked() { + match self.state.compare_exchange_weak( + state, + state & !QUEUE_LOCKED_BIT, + Ordering::Release, + Ordering::Relaxed, + ) { + Ok(_) => return, + Err(x) => state = x, + } + + // Need an acquire fence before reading the new queue + fence(Ordering::Acquire); + continue; + } + + // Remove the last thread from the queue and unlock the queue + let new_tail = unsafe { (*queue_tail).prev.get() }; + if new_tail.is_null() { + loop { + match self.state.compare_exchange_weak( + state, + state & LOCKED_BIT, + Ordering::Release, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(x) => state = x, + } + + // If the compare_exchange failed because a new thread was + // added to the queue then we need to re-scan the queue to + // find the previous element. + if state.queue_head().is_null() { + continue; + } else { + // Need an acquire fence before reading the new queue + fence(Ordering::Acquire); + continue 'outer; + } + } + } else { + unsafe { + (*queue_head).queue_tail.set(new_tail); + } + self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release); + } + + // Finally, wake up the thread we removed from the queue. Note that + // we don't need to worry about any races here since the thread is + // guaranteed to be sleeping right now and we are the only one who + // can wake it up. + unsafe { + (*queue_tail).parker.unpark_lock().unpark(); + } + break; + } + } +} + + + + +struct ThreadData { + parker: ThreadParker, + + // Linked list of threads in the queue. The queue is split into two parts: + // the processed part and the unprocessed part. When new nodes are added to + // the list, they only have the next pointer set, and queue_tail is null. + // + // Nodes are processed with the queue lock held, which consists of setting + // the prev pointer for each node and setting the queue_tail pointer on the + // first processed node of the list. + // + // This setup allows nodes to be added to the queue without a lock, while + // still allowing O(1) removal of nodes from the processed part of the list. + // The only cost is the O(n) processing, but this only needs to be done + // once for each node, and therefore isn't too expensive. + queue_tail: Cell<*const ThreadData>, + prev: Cell<*const ThreadData>, + next: Cell<*const ThreadData>, +} + +impl ThreadData { + #[inline] + fn new() -> ThreadData { + assert!(align_of::() > !QUEUE_MASK); + ThreadData { + parker: ThreadParker::new(), + queue_tail: Cell::new(ptr::null()), + prev: Cell::new(ptr::null()), + next: Cell::new(ptr::null()), + } + } +} + +pub struct ThreadParker { + futex: AtomicI32, +} +impl ThreadParkerT for ThreadParker { + type UnparkHandle = UnparkHandle; + + const IS_CHEAP_TO_CONSTRUCT: bool = true; + + #[inline] + fn new() -> ThreadParker { + ThreadParker { + futex: AtomicI32::new(0), + } + } + + #[inline] + unsafe fn prepare_park(&self) { + self.futex.store(1, Ordering::Relaxed); + } + + #[inline] + unsafe fn timed_out(&self) -> bool { + self.futex.load(Ordering::Relaxed) != 0 + } + + #[inline] + unsafe fn park(&self) { + while self.futex.load(Ordering::Acquire) != 0 { + self.futex_wait(None); + } + } + + #[inline] + unsafe fn park_until(&self, timeout: Instant) -> bool { + while self.futex.load(Ordering::Acquire) != 0 { + let now = Instant::now(); + if timeout <= now { + return false; + } + let diff = timeout - now; + if diff.as_secs() as libc::time_t as u64 != diff.as_secs() { + // Timeout overflowed, just sleep indefinitely + self.park(); + return true; + } + // SAFETY: libc::timespec is zero initializable. + let mut ts: libc::timespec = std::mem::zeroed(); + ts.tv_sec = diff.as_secs() as libc::time_t; + ts.tv_nsec = diff.subsec_nanos() as libc::c_long; + self.futex_wait(Some(ts)); + } + true + } + + // Locks the parker to prevent the target thread from exiting. This is + // necessary to ensure that thread-local ThreadData objects remain valid. + // This should be called while holding the queue lock. + #[inline] + unsafe fn unpark_lock(&self) -> UnparkHandle { + // We don't need to lock anything, just clear the state + self.futex.store(0, Ordering::Release); + + UnparkHandle { futex: &self.futex } + } +} +impl ThreadParker { + #[inline] + fn futex_wait(&self, ts: Option) { + let ts_ptr = ts + .as_ref() + .map(|ts_ref| ts_ref as *const _) + .unwrap_or(ptr::null()); + let r = unsafe { + libc::syscall( + libc::SYS_futex, + &self.futex, + libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG, + 1, + ts_ptr, + ) + }; + debug_assert!(r == 0 || r == -1); + if r == -1 { + debug_assert!( + errno() == libc::EINTR + || errno() == libc::EAGAIN + || (ts.is_some() && errno() == libc::ETIMEDOUT) + ); + } + } +} + +trait LockState { + fn is_locked(&self) -> bool; + fn is_queue_locked(&self) -> bool; + fn queue_head(&self) -> *const ThreadData; + fn with_queue_head(&self, thread_data: *const ThreadData) -> Self; +} + +impl LockState for usize { + #[inline] + fn is_locked(&self) -> bool { + self & LOCKED_BIT != 0 + } + + #[inline] + fn is_queue_locked(&self) -> bool { + self & QUEUE_LOCKED_BIT != 0 + } + + #[inline] + fn queue_head(&self) -> *const ThreadData { + (self & QUEUE_MASK) as *const ThreadData + } + + #[inline] + fn with_queue_head(&self, thread_data: *const ThreadData) -> Self { + (self & !QUEUE_MASK) | thread_data as *const _ as usize + } +} + +pub trait UnparkHandleT { + /// Wakes up the parked thread. This should be called after the queue lock is + /// released to avoid blocking the queue for too long. + /// + /// This method is unsafe for the same reason as the unsafe methods in + /// `ThreadParkerT`. + unsafe fn unpark(self); +} + +pub trait ThreadParkerT { + type UnparkHandle: UnparkHandleT; + + const IS_CHEAP_TO_CONSTRUCT: bool; + + fn new() -> Self; + + /// Prepares the parker. This should be called before adding it to the queue. + unsafe fn prepare_park(&self); + + /// Checks if the park timed out. This should be called while holding the + /// queue lock after park_until has returned false. + unsafe fn timed_out(&self) -> bool; + + /// Parks the thread until it is unparked. This should be called after it has + /// been added to the queue, after unlocking the queue. + unsafe fn park(&self); + + /// Parks the thread until it is unparked or the timeout is reached. This + /// should be called after it has been added to the queue, after unlocking + /// the queue. Returns true if we were unparked and false if we timed out. + unsafe fn park_until(&self, timeout: Instant) -> bool; + + /// Locks the parker to prevent the target thread from exiting. This is + /// necessary to ensure that thread-local ThreadData objects remain valid. + /// This should be called while holding the queue lock. + unsafe fn unpark_lock(&self) -> Self::UnparkHandle; +} + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub enum ParkResult { + /// We were unparked by another thread with the given token. + Unparked(UnparkToken), + + /// The validation callback returned false. + Invalid, + + /// The timeout expired. + TimedOut, +} + +fn errno() -> libc::c_int { + #[cfg(target_os = "linux")] + unsafe { + *libc::__errno_location() + } + #[cfg(target_os = "android")] + unsafe { + *libc::__errno() + } +} + + +#[inline] +fn cpu_relax(iterations: u32) { + for _ in 0..iterations { + hint::spin_loop() + } +} + +#[inline] +pub fn thread_yield() { + thread::yield_now(); +} + +#[inline] +fn with_thread_data(f: impl FnOnce(&ThreadData) -> T) -> T { + let mut thread_data_ptr = ptr::null(); + // If ThreadData is expensive to construct, then we want to use a cached + // version in thread-local storage if possible. + if !ThreadParker::IS_CHEAP_TO_CONSTRUCT { + thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); + if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) { + thread_data_ptr = tls_thread_data; + } + } + // Otherwise just create a ThreadData on the stack + let mut thread_data_storage = None; + if thread_data_ptr.is_null() { + thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new); + } + + f(unsafe { &*thread_data_ptr }) +} + + + + + + + + + diff --git a/ylong_io/src/sys/unix/aio/lockedbuf/locked_buf.rs b/ylong_io/src/sys/unix/aio/lockedbuf/locked_buf.rs new file mode 100644 index 0000000000000000000000000000000000000000..242a3920e4e80735ee9df4db4bec27693bc4307a --- /dev/null +++ b/ylong_io/src/sys/unix/aio/lockedbuf/locked_buf.rs @@ -0,0 +1,117 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![allow(clippy::all)] +use std::cell::UnsafeCell; +use std::{fmt, io}; +use std::fmt::Formatter; +use std::mem::ManuallyDrop; +use std::sync::Arc; +use crate::sys::unix::aio::lockedbuf::mmapmut::MmapMut; +use crate::sys::unix::aio::lockedbuf::scoped_lock; +use crate::sys::unix::aio::lockedbuf::scoped_lock::ScopedLock; + +#[derive(Debug)] +pub enum LockedBufError { + MapAnon(io::Error), + MemLock(scoped_lock::Error), +} +struct LockedBufInner { + bytes: ManuallyDrop, + scoped_lock: ManuallyDrop, +} + +/// Buffer with fixed capacity, locked to RAM. It prevents +/// memory from being paged to the swap area +/// +/// This is required to work with AIO operations. +pub struct LockedBuf { + inner: Arc> +} + +impl fmt::Display for LockedBuf { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("LockedBuf").field("size", &self.size()).finish() + } +} + +/// LockedBuf lifetime-extender +pub struct LifetimeExtender { + _inner: Arc>, +} + +impl LockedBuf { + /// Create with desired capacity + pub fn with_capacity(capacity: usize) -> Result { + let bytes = match MmapMut::map_anon(capacity) { + Ok(bytes) => bytes, + Err(e) => return Err(LockedBufError::MapAnon(e)), + }; + let socped_lock = match scoped_lock::lock(bytes.as_ref().as_ptr(), capacity) { + Ok(item) => item, + Err(e) => return Err(LockedBufError::MemLock(e)), + }; + Ok(LockedBuf { + inner: Arc::new(UnsafeCell::new(LockedBufInner { + bytes: ManuallyDrop::new(bytes), + scoped_lock: ManuallyDrop::new(socped_lock), + })), + }) + } + /// Return current capacity + pub fn size(&self) -> usize { + unsafe { &*self.inner.get() }.bytes.len() + } + + pub(crate) fn aio_addr_and_len(&self) -> (u64, u64) { + let len = self.size() as u64; + let ptr = unsafe { (*self.inner.get()).bytes.as_ptr() as usize } as u64; + (ptr, len) + } + + /// Handle, which prevents LockedBuf to drop while request is in-flight + pub(crate) fn lifetime_extender(&self) -> LifetimeExtender { + LifetimeExtender { + _inner: self.inner.clone(), + } + } +} + +impl AsRef<[u8]> for LockedBuf { + fn as_ref(&self) -> &[u8] { + let inner = unsafe { &*self.inner.get() }; + inner.bytes.as_ref() + } +} + +impl AsMut<[u8]> for LockedBuf { + fn as_mut(&mut self) -> &mut [u8] { + let inner = unsafe { &mut *self.inner.get() }; + inner.bytes.as_mut() + } +} + +impl Drop for LockedBufInner { + fn drop(&mut self) { + unsafe { + ManuallyDrop::drop(&mut self.scoped_lock); + ManuallyDrop::drop(&mut self.bytes); + } + } +} + +unsafe impl Send for LockedBuf {} +unsafe impl Sync for LockedBuf {} +unsafe impl Send for LifetimeExtender {} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/lockedbuf/mmapmut.rs b/ylong_io/src/sys/unix/aio/lockedbuf/mmapmut.rs new file mode 100644 index 0000000000000000000000000000000000000000..3f82c462a0167b7a5c08e35650700abf4f5f46e1 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/lockedbuf/mmapmut.rs @@ -0,0 +1,163 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{io, ptr, slice}; +use std::ops::{Deref, DerefMut}; +use std::os::fd::RawFd; +use crate::sys::unix::aio::lockedbuf::page_size; + + +const MAP_STACK: libc::c_int = libc::MAP_STACK; + +#[derive(Default)] +struct MmapOptions { + // offset: u64, + len: Option, + stack: bool, +} + +impl MmapOptions { + fn new() -> MmapOptions { + MmapOptions::default() + } + + fn len(&mut self, len:usize) -> &mut Self { + self.len = Some(len); + self + } + + fn map_anon(&self) -> io::Result { + MmapInner::map_anon(self.len.unwrap_or(0), self.stack) + .map(|inner|MmapMut{ inner }) + } +} + +pub struct MmapMut { + inner: MmapInner +} +impl MmapMut { + pub fn map_anon(length: usize) -> io::Result { + MmapOptions::new().len(length).map_anon() + } +} +impl Deref for MmapMut { + type Target = [u8]; + + #[inline] + fn deref(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.inner.ptr(), self.inner.len()) } + } +} +impl DerefMut for MmapMut { + #[inline] + fn deref_mut(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.inner.mut_ptr(), self.inner.len()) } + } +} +impl AsRef<[u8]> for MmapMut { + #[inline] + fn as_ref(&self) -> &[u8] { + self.deref() + } +} + +impl AsMut<[u8]> for MmapMut { + #[inline] + fn as_mut(&mut self) -> &mut [u8] { + self.deref_mut() + } +} + +struct MmapInner { + ptr: *mut libc::c_void, + len: usize, +} + +impl MmapInner { + fn new( + len: usize, + prot: libc::c_int, + flags: libc::c_int, + file: RawFd, + offset: u64, + ) -> io::Result { + let page_size = offset % page_size() as u64; + let page_offset = offset - page_size; + let page_len = len + page_size as usize; + if page_len == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "memory map length can't be zero", + )); + } + + unsafe { + let ptr = libc::mmap( + ptr::null_mut(), + page_len as libc::size_t, + prot, + flags, + file, + page_offset as libc::off_t, + ); + + if ptr == libc::MAP_FAILED { + Err(io::Error::last_os_error()) + } else { + Ok(MmapInner { + ptr: ptr.offset(page_size as isize), + len, + }) + } + } + } + + fn map_anon(len: usize, stack: bool) -> io::Result { + let stack = if stack {MAP_STACK} else { 0 }; + MmapInner::new( + len, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_SHARED | libc::MAP_ANON | stack, + -1, + 0, + ) + } + #[inline] + fn ptr(&self) -> *const u8 { + self.ptr as *const u8 + } + #[inline] + fn mut_ptr(&mut self) -> *mut u8 { + self.ptr as *mut u8 + } + #[inline] + fn len(&self) -> usize { + self.len + } +} + +impl Drop for MmapInner { + fn drop(&mut self) { + let page_size = self.ptr as usize % page_size(); + unsafe { + assert_eq!(libc::munmap( + self.ptr.offset(-(page_size as isize)), + (self.len + page_size) as libc::size_t + ), 0, "unable to unmap mmap: {}", io::Error::last_os_error()); + } + } +} + diff --git a/ylong_io/src/sys/unix/aio/lockedbuf/mod.rs b/ylong_io/src/sys/unix/aio/lockedbuf/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f6fc0b6394c963f15b7ba782fc836afdf89fffa --- /dev/null +++ b/ylong_io/src/sys/unix/aio/lockedbuf/mod.rs @@ -0,0 +1,23 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +mod mmapmut; +pub(crate) mod locked_buf; +pub use locked_buf::{LifetimeExtender, LockedBuf}; +mod scoped_lock; + +pub(crate) fn page_size() -> usize { + unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/lockedbuf/scoped_lock.rs b/ylong_io/src/sys/unix/aio/lockedbuf/scoped_lock.rs new file mode 100644 index 0000000000000000000000000000000000000000..e3f29950f9b6fc464c9b357957e6b1080498aa7f --- /dev/null +++ b/ylong_io/src/sys/unix/aio/lockedbuf/scoped_lock.rs @@ -0,0 +1,132 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Once; +use std::{fmt, io}; +use crate::sys::unix::aio::lockedbuf::page_size; + + +pub struct ScopedLock { + address: *const u8, + size: usize, +} + +impl ScopedLock { + fn new(address: *const u8, size: usize) -> Self { + ScopedLock { address, size } + } +} + +pub(crate) fn lock(address: *const u8, size: usize) -> Result { + if address.is_null() { + return Err(Error::NullAddress); + } + + if size == 0 { + return Err(Error::EmptyRange); + } + + inner_lock( + floor(address as usize) as *const u8, + size_from_range(address, size), + ).map(|_| ScopedLock::new(address, size)) +} + +pub(crate) unsafe fn unlock(address: *const u8, size: usize) -> Result<(),Error> { + if address.is_null() { + return Err(Error::NullAddress); + } + + if size == 0 { + return Err(Error::EmptyRange); + } + + inner_unlock( + floor(address as usize) as *const u8, + size_from_range(address, size), + ) +} +#[inline] +fn p_size() -> usize { + static INIT: Once = Once::new(); + static mut PAGE_SIZE: usize = 0; + unsafe { + INIT.call_once(|| PAGE_SIZE = page_size()); + PAGE_SIZE + } +} + +#[inline] +fn floor(address: usize) -> usize { + address & !(p_size() - 1) +} + +#[inline] +fn ceil(address: usize) -> usize { + let page_size = p_size(); + (address + page_size - 1) & !(page_size - 1) +} + +#[inline] +fn size_from_range(address: *const u8, size: usize) -> usize { + let size = if size == 0 { p_size() } else { size }; + + ceil(address as usize % p_size() + size) +} +fn inner_lock(base: *const u8, size: usize) -> Result<(),Error> { + let result = unsafe { ::libc::mlock(base as *const ::libc::c_void, size) }; + match result { + 0 => Ok(()), + _ => Err(Error::SystemCall(io::Error::last_os_error())), + } +} +fn inner_unlock(base: *const u8, size: usize) -> Result<(),Error> { + let result = unsafe { ::libc::munlock(base as *const ::libc::c_void, size) }; + match result { + 0 => Ok(()), + _ => Err(Error::SystemCall(io::Error::last_os_error())), + } +} + +/// A collection of possible errors. +#[derive(Debug)] +pub enum Error { + /// The supplied address is null. + NullAddress, + /// The supplied address range is empty. + EmptyRange, + /// A system call failed. + SystemCall(io::Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::NullAddress => write!(f, "Address must not be null"), + Error::EmptyRange => write!(f, "Address range must be larger than zero"), + Error::SystemCall(ref error) => write!(f, "System call failed: {}", error), + } + } +} + +unsafe impl Send for ScopedLock {} +unsafe impl Sync for ScopedLock {} +impl Drop for ScopedLock { + fn drop(&mut self) { + let result = unsafe { unlock(self.address, self.size) }; + debug_assert!(result.is_ok(), "unlocking region"); + } +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/aio/mod.rs b/ylong_io/src/sys/unix/aio/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..6377f1f02df6d1cb4dd4bb101b8203e4f4cc7d94 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/mod.rs @@ -0,0 +1,35 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +mod flag; +pub use flag::{ReadFlags, WriteFlags}; +mod aio_kernel; +pub use aio_kernel::{io_get_events, io_submit, io_setup, io_destroy}; +pub use aio_kernel::{AioContextT, AioResult, iocb}; +mod file_command; +pub use file_command::FileCommand; +mod noop_lock; +pub use noop_lock::{NoopLock, RawMutex as RawMutexTrait}; +mod lockedbuf; +pub use lockedbuf::{LifetimeExtender, LockedBuf}; +mod atomic_node; +pub use atomic_node::{AtomicNode, LinkedList}; +mod request_mutex; +pub use request_mutex::RequestMutex; + +mod intrusive_adapter; +mod aio_event; + +pub use aio_event::{AioEventFd, EventFdError, Sink, Stream, StreamExt}; + +pub use intrusive_adapter::{Link, RawMutex}; +pub use intrusive_adapter::{PointerOps, Adapter, DefaultLinkOps, LinkedListOps, LinkOps}; diff --git a/ylong_io/src/sys/unix/aio/noop_lock.rs b/ylong_io/src/sys/unix/aio/noop_lock.rs new file mode 100644 index 0000000000000000000000000000000000000000..40c70939fc479dd003b1ba2b431c7cc41df89eb0 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/noop_lock.rs @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::marker::PhantomData; + +/// Basic operations for a mutex. +/// +/// Types implementing this trait can be used by `Mutex` to form a safe and +/// fully-functioning mutex type. +/// +/// # Safety +/// +/// Implementations of this trait must ensure that the mutex is actually +/// exclusive: a lock can't be acquired while the mutex is already locked. +pub unsafe trait RawMutex { + /// Initial value for an unlocked mutex. + #[allow(clippy::declare_interior_mutable_const)] + const INIT: Self; + /// Marker type which determines whether a lock guard should be `Send`. Use + /// one of the `GuardSend` or `GuardNoSend` helper types here. + type GuardMarker; + /// Acquires this mutex, blocking the current thread until it is able to do so. + fn lock(&self); + /// Attempts to acquire this mutex without blocking. Returns `true` + /// if the lock was successfully acquired and `false` otherwise. + fn try_lock(&self) -> bool; + /// Unlocks this mutex. + fn unlock(&self); +} + +pub struct GuardSend(()); +/// An unsafe (non-thread-safe) lock, equivalent to UnsafeCell +pub struct NoopLock { + _phantom: PhantomData<*mut ()>, +} +unsafe impl RawMutex for NoopLock { + const INIT: Self = NoopLock { + _phantom: PhantomData, + }; + type GuardMarker = GuardSend; + + fn lock(&self) {} + + fn try_lock(&self) -> bool { true } + + fn unlock(&self) {} +} + diff --git a/ylong_io/src/sys/unix/aio/request_mutex.rs b/ylong_io/src/sys/unix/aio/request_mutex.rs new file mode 100644 index 0000000000000000000000000000000000000000..bd17fabf06a116766e686c3d8b73242227e87515 --- /dev/null +++ b/ylong_io/src/sys/unix/aio/request_mutex.rs @@ -0,0 +1,113 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +use std::cell::UnsafeCell; +use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; +use crate::sys::RawMutexTrait; + +/// A mutual exclusion primitive useful for protecting shared data +/// +/// This mutex will block threads waiting for the lock to become available. The +/// mutex can also be statically initialized or created via a `new` +/// constructor. Each mutex has a type parameter which represents the data that +/// it is protecting. The data can only be accessed through the RAII guards +/// returned from `lock` and `try_lock`, which guarantees that the data is only +/// ever accessed when the mutex is locked. +pub struct RequestMutex { + raw: R, + data: UnsafeCell, +} + +pub struct RequestMutexGuard<'a, R: RawMutexTrait, T: ?Sized> { + mutex: &'a RequestMutex, + marker: PhantomData<(&'a mut T, R::GuardMarker)> +} +unsafe impl Send for RequestMutex {} +unsafe impl Sync for RequestMutex {} + +unsafe impl<'a, R: RawMutexTrait + Sync + 'a, T: ?Sized + Sync + 'a> Sync for RequestMutexGuard<'a, R, T> {} + +impl RequestMutex { + /// Creates a new mutex in an unlocked state ready for use. + pub fn new(val: T) -> RequestMutex { + RequestMutex { + raw: R::INIT, + data: UnsafeCell::new(val), + } + } +} + +impl RequestMutex { + #[inline] + unsafe fn guard(&self) -> RequestMutexGuard<'_, R, T> { + RequestMutexGuard { + mutex: self, + marker: PhantomData, + } + } + + /// Acquires a mutex, blocking the current thread until it is able to do so. + /// + /// This function will block the local thread until it is available to acquire + /// the mutex. Upon returning, the thread is the only thread with the mutex + /// held. An RAII guard is returned to allow scoped unlock of the lock. When + /// the guard goes out of scope, the mutex will be unlocked. + /// + /// Attempts to lock a mutex in the thread which already holds the lock will + /// result in a deadlock. + pub fn lock(&self) -> RequestMutexGuard<'_, R, T> { + self.raw.lock(); + unsafe { self.guard() } + } + + /// Forcibly unlocks the mutex. + /// + /// This is useful when combined with `mem::forget` to hold a lock without + /// the need to maintain a `MutexGuard` object alive, for example when + /// dealing with FFI. + /// + /// # Safety + /// + /// This method must only be called if the current thread logically owns a + /// `MutexGuard` but that guard has be discarded using `mem::forget`. + /// Behavior is undefined if a mutex is unlocked when not locked. + #[inline] + pub unsafe fn force_unlock(&self) { + self.raw.unlock(); + } + +} + +impl<'a, R: RawMutexTrait + 'a, T: ?Sized + 'a> Deref for RequestMutexGuard<'a, R, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + unsafe { &*self.mutex.data.get() } + } +} + +impl<'a, R: RawMutexTrait + 'a, T: ?Sized + 'a> DerefMut for RequestMutexGuard<'a, R, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.mutex.data.get() } + } +} + +impl<'a, R: RawMutexTrait + 'a, T: ?Sized + 'a> Drop for RequestMutexGuard<'a, R, T> { + #[inline] + fn drop(&mut self) { + self.mutex.raw.unlock(); + } +} \ No newline at end of file diff --git a/ylong_io/src/sys/unix/epoll.rs b/ylong_io/src/sys/unix/epoll.rs index a49621355cb0e14a6d5de00c595cd119a3a31c28..0a698ae48dba40e111da016372f7fec90a45e215 100644 --- a/ylong_io/src/sys/unix/epoll.rs +++ b/ylong_io/src/sys/unix/epoll.rs @@ -55,7 +55,7 @@ impl Selector { // Convert to milliseconds, if input time is none, it means the timeout is -1 // and wait permanently. let timeout = timeout.map(|time| time.as_millis() as c_int).unwrap_or(-1); - + println!("poll poll select"); events.clear(); match syscall!(epoll_wait( @@ -74,6 +74,7 @@ impl Selector { /// Registers the fd with specific interested events pub fn register(&self, fd: i32, token: Token, interests: Interest) -> io::Result<()> { + println!("epoll register"); let mut sys_event = libc::epoll_event { events: interests.into_io_event(), u64: usize::from(token) as u64, diff --git a/ylong_io/src/sys/unix/mod.rs b/ylong_io/src/sys/unix/mod.rs index a1627fee40ad30cbe98e67207df6d29f5573c721..a2f0c0b4a3e8de3ce698f4b9a4fe68f699e1ecfd 100644 --- a/ylong_io/src/sys/unix/mod.rs +++ b/ylong_io/src/sys/unix/mod.rs @@ -33,7 +33,16 @@ cfg_udp! { mod udp; pub use self::udp::{UdpSocket, ConnectedUdpSocket}; } - +#[cfg(target_os = "linux")] +cfg_aio! { + mod aio; + pub use self::aio::{io_get_events, io_submit, io_setup, io_destroy, + FileCommand, AioResult, AioContextT, NoopLock, RawMutexTrait, + LifetimeExtender, LockedBuf, AtomicNode, LinkedList, RequestMutex, + ReadFlags, WriteFlags, AioEventFd, EventFdError, Sink, Stream, StreamExt, + Link, RawMutex, PointerOps, Adapter, DefaultLinkOps,LinkedListOps, LinkOps,iocb + }; +} mod uds; pub use uds::{SocketAddr, UnixDatagram, UnixListener, UnixStream}; @@ -55,4 +64,5 @@ mod waker; pub(crate) use waker::WakerInner; mod source_fd; + pub use source_fd::SourceFd; diff --git a/ylong_io/src/sys/unix/source_fd.rs b/ylong_io/src/sys/unix/source_fd.rs index bc1844e827e9076427137820227f0566e4f56dc1..d8f43a5c4b72116e4795718f5baa926bbd5c4253 100644 --- a/ylong_io/src/sys/unix/source_fd.rs +++ b/ylong_io/src/sys/unix/source_fd.rs @@ -27,6 +27,7 @@ impl<'a> Source for SourceFd<'a> { token: Token, interests: Interest, ) -> io::Result<()> { + println!("Source sourcefd register"); selector.register(self.get_fd(), token, interests) } diff --git a/ylong_io/src/sys/unix/tcp/listener.rs b/ylong_io/src/sys/unix/tcp/listener.rs index a8a65baf17968e654db1ca0f49503c54a5f586c9..75aedc366dafbf274527d394fd2ec019b541e68b 100644 --- a/ylong_io/src/sys/unix/tcp/listener.rs +++ b/ylong_io/src/sys/unix/tcp/listener.rs @@ -179,6 +179,7 @@ impl Source for TcpListener { token: Token, interests: Interest, ) -> io::Result<()> { + println!("TcpListener register"); selector.register(self.get_fd(), token, interests) } diff --git a/ylong_io/src/sys/unix/tcp/stream.rs b/ylong_io/src/sys/unix/tcp/stream.rs index 4363f165b382becedc32dbb0ea18f1cb53ab218a..510eac6dd6ccb057e751383997a7cce717440403 100644 --- a/ylong_io/src/sys/unix/tcp/stream.rs +++ b/ylong_io/src/sys/unix/tcp/stream.rs @@ -283,6 +283,7 @@ impl Source for TcpStream { token: Token, interests: Interest, ) -> io::Result<()> { + println!("TcpStream Register"); selector.register(self.get_fd(), token, interests) } diff --git a/ylong_io/src/sys/unix/udp/udp_socket.rs b/ylong_io/src/sys/unix/udp/udp_socket.rs index c802237b98f49c33e1a635bc692c2cdf19007909..798b57d68ee23383ee943641857d4e54c4fb9951 100644 --- a/ylong_io/src/sys/unix/udp/udp_socket.rs +++ b/ylong_io/src/sys/unix/udp/udp_socket.rs @@ -760,6 +760,7 @@ impl Source for UdpSocket { token: Token, interests: Interest, ) -> io::Result<()> { + println!("UdpSocket register"); selector.register(self.get_fd(), token, interests) } @@ -779,6 +780,7 @@ impl Source for ConnectedUdpSocket { token: Token, interests: Interest, ) -> io::Result<()> { + println!("connectUdpSocket register"); selector.register(self.get_fd(), token, interests) } diff --git a/ylong_io/src/sys/unix/uds/datagram.rs b/ylong_io/src/sys/unix/uds/datagram.rs index d171f84caa407ec342ed8db2172f4c98ab24d489..e0709cf98d33e1c28475490f96d5defb6069661c 100644 --- a/ylong_io/src/sys/unix/uds/datagram.rs +++ b/ylong_io/src/sys/unix/uds/datagram.rs @@ -266,6 +266,7 @@ impl Source for UnixDatagram { token: Token, interests: Interest, ) -> io::Result<()> { + println!("UnixDatagram register"); selector.register(self.inner.as_raw_fd(), token, interests) } diff --git a/ylong_io/src/sys/unix/uds/listener.rs b/ylong_io/src/sys/unix/uds/listener.rs index ef1312e8e4520264e533c4eb300ed4094e40fa03..7840d7fac198121b0b0996799973769864e84953 100644 --- a/ylong_io/src/sys/unix/uds/listener.rs +++ b/ylong_io/src/sys/unix/uds/listener.rs @@ -169,6 +169,7 @@ impl Source for UnixListener { token: Token, interests: Interest, ) -> io::Result<()> { + println!("UnixListener register"); selector.register(self.inner.as_raw_fd(), token, interests) } diff --git a/ylong_io/src/sys/unix/uds/stream.rs b/ylong_io/src/sys/unix/uds/stream.rs index fb36191829f5e32db6e1496910cbf3c3498d9ea4..a2289faa88ef414eae5ffada4ce85ca7dc2884a6 100644 --- a/ylong_io/src/sys/unix/uds/stream.rs +++ b/ylong_io/src/sys/unix/uds/stream.rs @@ -224,6 +224,7 @@ impl Source for UnixStream { token: Token, interests: Interest, ) -> io::Result<()> { + println!("UnixStream register"); selector.register(self.inner.as_raw_fd(), token, interests) } diff --git a/ylong_io/src/sys/windows/net.rs b/ylong_io/src/sys/windows/net.rs index 0529dd1506d090e93fb3cfb74df77ea832907d01..18b8c2dab945e34f6bda1bbdf4f7b11bbc7d59e1 100644 --- a/ylong_io/src/sys/windows/net.rs +++ b/ylong_io/src/sys/windows/net.rs @@ -49,6 +49,7 @@ impl NetState { interests: Interest, socket: RawSocket, ) -> io::Result<()> { + println!("NetState register"); match self.inner { Some(_) => Err(io::ErrorKind::AlreadyExists.into()), None => selector.register(socket, token, interests).map(|state| { diff --git a/ylong_io/src/waker.rs b/ylong_io/src/waker.rs index 481216d9f267aea2d5c1d36c5ddd72663630101f..7ee8779e4c6e9c49c83ccc3371462f020a444334 100644 --- a/ylong_io/src/waker.rs +++ b/ylong_io/src/waker.rs @@ -25,10 +25,12 @@ pub struct Waker { impl Waker { /// Creates a new Waker pub fn new(poll: &Poll, token: Token) -> io::Result { + println!("io waker new"); WakerInner::new(poll.selector(), token).map(|inner| Waker { inner }) } /// Wakes up the [`Poll`] associated with this `Waker` pub fn wake(&self) -> io::Result<()> { + println!("io waker wake()"); self.inner.wake() } } diff --git a/ylong_runtime/BUILD.gn b/ylong_runtime/BUILD.gn index a96ead4403719d5c6abb8b1cfca6faddbcab642f..2cfa337f5ca3012cae4b9dc2489769d1d28a6941 100644 --- a/ylong_runtime/BUILD.gn +++ b/ylong_runtime/BUILD.gn @@ -28,6 +28,7 @@ ohos_rust_shared_library("ylong_runtime") { "signal", "sync", "time", + "aio", ] sources = [ "src/lib.rs" ] @@ -54,6 +55,7 @@ ohos_rust_static_library("ylong_runtime_static") { "signal", "sync", "time", + "aio", ] sources = [ "src/lib.rs" ] diff --git a/ylong_runtime/Cargo.toml b/ylong_runtime/Cargo.toml index 0514ed2bf99142747b7c85246305e536e53ffb72..9adeedd40f6f2cc01d73a52680c06f92e51ad079 100644 --- a/ylong_runtime/Cargo.toml +++ b/ylong_runtime/Cargo.toml @@ -20,6 +20,7 @@ full = [ "process", "fs", "macros", + "aio", ] ffrt_full = [ @@ -31,6 +32,7 @@ ffrt_full = [ "fs", "ffrt", "macros", + "aio", ] # This feature controls the executor type runs below the runtime. @@ -69,6 +71,8 @@ metrics = [] # Process component process = ["signal"] +aio = ["ylong_io/aio", "sync", "fs", "net", "macros"] + [dependencies] libc = "0.2.134" ylong_signal = { path = "../ylong_signal", optional = true } diff --git a/ylong_runtime/examples/ylong_runtime_runtime.rs b/ylong_runtime/examples/ylong_runtime_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f5d72d09294a489b5587fee0765babab7ce9974 --- /dev/null +++ b/ylong_runtime/examples/ylong_runtime_runtime.rs @@ -0,0 +1,13 @@ +//! Multi_thread RuntimeBuilder usage. +#![cfg(not(feature = "ffrt"))] +use ylong_runtime::builder::RuntimeBuilder; + +fn main() { + let core_pool_size = 4; + let is_affinity = true; + let runtime_one = RuntimeBuilder::new_multi_thread() + .is_affinity(is_affinity) + .worker_num(core_pool_size) + .build() + .unwrap(); +} \ No newline at end of file diff --git a/ylong_runtime/src/aio/aio_future.rs b/ylong_runtime/src/aio/aio_future.rs new file mode 100644 index 0000000000000000000000000000000000000000..e3d3f59e442b03abfdd63379d2ba5ada69d8b933 --- /dev/null +++ b/ylong_runtime/src/aio/aio_future.rs @@ -0,0 +1,119 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::future::Future; +use std::mem; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use ylong_io::sys::{DefaultLinkOps, LinkedListOps}; +use ylong_io::sys::RawMutexTrait as RawMutex; +use ylong_io::sys::AioResult; +use crate::aio::error::AioCommandError; +use crate::aio::lib::AioContextInner; +use crate::aio::requests::intrusive_adapter::IntrusiveAdapter; +use crate::aio::requests::Request; +use crate::sync::oneshot; + +pub(crate) struct AioFuture< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> where + A::LinkOps: LinkedListOps + Default, +{ + rx: oneshot::Receiver, + inner_context: Arc>, + request: Option>>, +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> AioFuture + where + A::LinkOps: LinkedListOps + Default, +{ + fn return_request_to_pool(&mut self) { + let req = self.request.take().unwrap(); + mem::drop(req.inner.lock().take_lifetime_extender()); + self.inner_context + .requests + .lock() + .return_in_flight_to_ready(req); + + if let Some(c) = &self.inner_context.capacity { + c.release() + } + } + + pub(crate) fn new( + inner_context: &Arc>, + rx: oneshot::Receiver, + request: Box>, + ) -> Self { + AioFuture { + rx, + inner_context: inner_context.clone(), + request: Some(request), + } + } +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> Future for AioFuture + where + A::LinkOps: LinkedListOps + Default, +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = poll_ready!(Pin::new(&mut self.rx).poll(cx)) + .expect("AIO stopped while AioWaitFuture was not completed"); + self.return_request_to_pool(); + + Poll::Ready(Ok(res)) + } +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> Drop for AioFuture + where + A::LinkOps: LinkedListOps + Default, +{ + fn drop(&mut self) { + self.rx.close(); + + if self.rx.try_recv().is_ok() { + // the sender have successfully sent data to the channel, but we didn't accept it + self.return_request_to_pool(); + } + + if let Some(in_flight) = self.request.take() { + self.inner_context + .requests + .lock() + .move_to_outstanding(in_flight) + } + } +} diff --git a/ylong_runtime/src/aio/aiofd.rs b/ylong_runtime/src/aio/aiofd.rs new file mode 100644 index 0000000000000000000000000000000000000000..a77a15ee3df26c89640d5ed7baf1fe64a06d6f3a --- /dev/null +++ b/ylong_runtime/src/aio/aiofd.rs @@ -0,0 +1,12 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. \ No newline at end of file diff --git a/ylong_runtime/src/aio/error.rs b/ylong_runtime/src/aio/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0ccf2ba6b882e1fa089f8aa3585710dcb58d0f6 --- /dev/null +++ b/ylong_runtime/src/aio/error.rs @@ -0,0 +1,57 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::io; +use ylong_io::sys::EventFdError; +use crate::sync::SemaphoreError; + +#[derive(Debug)] +pub enum AioCommandError { + AioStopped, + IoSubmit(io::Error), + BadResult(io::Error), + NonZeroCode, + CapacityExceeded, + SemaphoreAcquire(SemaphoreError), +} + +impl From for AioContextError { + fn from(value: EventFdError) -> Self { + AioContextError::EventFd(value) + } +} + +impl From for AioContextError { + fn from(value: io::Error) -> Self { + AioContextError::IoSetup(value) + } +} + +impl From for AioContextError { + fn from(value: SemaphoreError) -> Self { + AioContextError::Semaphore(value) + } +} + +impl From for AioCommandError { + fn from(value: SemaphoreError) -> Self { + AioCommandError::SemaphoreAcquire(value) + } +} + +#[derive(Debug)] +pub enum AioContextError { + EventFd(EventFdError), + IoSetup(io::Error), + Semaphore(SemaphoreError), +} \ No newline at end of file diff --git a/ylong_runtime/src/aio/eventfd.rs b/ylong_runtime/src/aio/eventfd.rs new file mode 100644 index 0000000000000000000000000000000000000000..09291cefc66f348b19cb2d6cdd6ea642e772cbde --- /dev/null +++ b/ylong_runtime/src/aio/eventfd.rs @@ -0,0 +1,155 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{fmt, io, slice}; +use std::fs::File; +use std::io::{Read, Write}; +use std::ops::Deref; +use std::os::fd::{AsRawFd, FromRawFd, RawFd}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use core::mem::size_of; +use libc::eventfd; +use ylong_io::Interest; +use ylong_io::sys::{AioEventFd, EventFdError, Sink, Stream}; +use crate::net::{AsyncSource}; + +pub struct EventFd { + evented: AsyncSource, + accepted: Option, +} +impl fmt::Debug for EventFd { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EventFd").finish() + } +} +impl AsRawFd for EventFd { + fn as_raw_fd(&self) -> RawFd { + self.evented.deref().inner.as_raw_fd() + } +} +impl EventFd { + pub(crate) fn new(permits: usize, use_semaphore: bool) -> Result { + let flags = if use_semaphore { + libc::O_CLOEXEC | libc::EFD_NONBLOCK as i32 | libc::EFD_SEMAPHORE as i32 + } else { + libc::O_CLOEXEC | libc::EFD_NONBLOCK as i32 + }; + + + let fd = unsafe { + eventfd(permits as libc::c_uint, flags) + }; + + if fd < 0 { + return Err(EventFdError::CreateError(io::Error::last_os_error())); + } + + Ok(EventFd { + evented: AsyncSource::new(AioEventFd { + inner: unsafe { File::from_raw_fd(fd)}, + }, None).map_err(EventFdError::PollError)?, + accepted: None, + }) + } +} +impl Stream for EventFd { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + println!("eventfd poll_next"); + let interest = Interest::READABLE; + + poll_ready!(self.evented.poll_ready(cx, interest)).map_err(EventFdError::PollError)?; + + let mut result = 0u64; + let result_ptr = &mut result as *mut u64 as *mut u8; + + match self + .evented + .get_mut() + .inner + .read(unsafe { slice::from_raw_parts_mut(result_ptr, 8) }) + { + Ok(rc) => { + if rc != size_of::() { + panic!( + "Reading from an eventfd should transfer exactly {} bytes", + size_of::() + ) + } + + assert_ne!(result, 0); + Poll::Ready(Some(Ok(result))) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.evented + .clear_poll_ready(cx, interest) + .map_err(EventFdError::PollError)?; + + Poll::Pending + } + Err(e) => Poll::Ready(Some(Err(EventFdError::ReadError(e)))), + } + } +} +impl Sink for EventFd { + type Error = EventFdError; + + fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + if self.accepted.is_none() { + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + } + + fn start_send(mut self: Pin<&mut Self>, item: u64) -> Result<(), Self::Error> { + assert!(self.accepted.is_none()); + self.accepted = Some(item); + + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let interest = Interest::WRITABLE; + poll_ready!(self.evented.poll_ready(cx, interest)).map_err(EventFdError::PollError)?; + + { + let bytes: &mut [u8; 8] = + unsafe { &mut *(self.accepted.as_mut().unwrap() as *mut u64 as *mut [u8; 8]) }; + + match self.evented.get_mut().inner.write(bytes) { + Ok(rc) => { + assert_eq!(8, rc); + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.evented + .clear_poll_ready(cx, interest) + .map_err(EventFdError::PollError)?; + + return Poll::Pending; + } + Err(e) => return Poll::Ready(Err(EventFdError::ReadError(e))), + } + } + + self.accepted = None; + + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.poll_flush(cx) + } +} diff --git a/ylong_runtime/src/aio/fs/file.rs b/ylong_runtime/src/aio/fs/file.rs new file mode 100644 index 0000000000000000000000000000000000000000..3724ea9e59d7be6d6d3190d4920938fe3ba40e34 --- /dev/null +++ b/ylong_runtime/src/aio/fs/file.rs @@ -0,0 +1,377 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{fmt, io}; +use std::fs::{Metadata, Permissions}; +use std::os::fd::{AsRawFd, RawFd}; +use std::path::{Path, PathBuf}; +use ylong_io::sys::{DefaultLinkOps, FileCommand, LinkedListOps, LockedBuf}; +use ylong_io::sys::RawMutexTrait as RawMutex; +use crate::aio::error::AioCommandError; +use crate::aio::lib::AioContextHandle; +use crate::aio::requests::intrusive_adapter::IntrusiveAdapter; +use crate::fs; +use crate::fs::OpenOptions; + +/// An AIO wrapping of [`ylong_runtime::fs::File`]. Provides async read/write +/// methods. +pub struct AioFile { + pub(crate) inner: fs::File, +} + +impl fmt::Debug for AioFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("File").field("inner", &self.inner).finish() + } +} + +impl AioFile { + /// Open a file in read-only mode. It always adds O_DIRECT flag, + /// if is_sync is true, the O_SYNC flag will be added. + /// + /// See the [`OpenOptions::open`] method for more details. + /// + /// # Errors + /// + /// This function will return an error if `path` does not already exist. + /// + /// # Examples + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// + /// async fn open() -> std::io::Result<()> { + /// let mut f = AioFile::open("file_aio1.txt", false).await?; + /// Ok(()) + /// } + /// ``` + pub async fn open(path: impl AsRef, is_sync: bool) -> io::Result { + let mut open_options = OpenOptions::new(); + open_options.read(true).write(false); + + let mut path_buf = PathBuf::new(); + path_buf.push(path); + + open_options.aio_open(path_buf, is_sync).await + } + + /// Crate a file in write-only mode. It always adds O_DIRECT flag, + /// if is_sync is true, the O_SYNC flag will be added + /// + /// This function will create a file if it does not exist + /// and truncate it if it does. + /// + /// # Examples + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// + /// async fn open() -> std::io::Result<()> { + /// let mut f = AioFile::create("file_aio2.txt", false).await?; + /// Ok(()) + /// } + /// ``` + pub async fn create(path: impl AsRef, is_sync: bool) -> io::Result { + let mut open_options = OpenOptions::new(); + open_options.write(true).truncate(true).create(true); + + let mut path_buf = PathBuf::new(); + path_buf.push(path); + + open_options.aio_open(path_buf, is_sync).await + } + + /// Truncates or extends the underlying file, updating the size of this file + /// to become size. + /// + /// See the [`crate::fs::File::set_len`] method for more details. + /// + /// # Errors + /// + /// This function will return an error if the file is not opened for + /// writing. + /// + /// # Example + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// + /// async fn set_len() -> std::io::Result<()> { + /// let mut f = AioFile::create("file_aio3.txt", false).await?; + /// f.set_len(10).await?; + /// + /// Ok(()) + /// } + /// ``` + pub async fn set_len(&mut self, size: u64) -> io::Result<()> { + self.inner.set_len(size).await + } + + /// Queries metadata about the underlying file asynchronously. + /// + /// # Examples + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// + /// async fn metadata() -> std::io::Result<()> { + /// let mut f = AioFile::open("file_aio4.txt", false).await?; + /// let metadata = f.metadata().await?; + /// Ok(()) + /// } + /// ``` + pub async fn metadata(&self) -> io::Result { + self.inner.metadata().await + } + + /// Changes the permissions on the underlying file asynchronously. + /// + /// See the [`crate::fs::File::set_permissions`] method for more details. + /// + /// # Examples + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// + /// async fn set_permissions() -> std::io::Result<()> { + /// let file = AioFile::open("file_aio5.txt", false).await?; + /// let mut perms = file.metadata().await?.permissions(); + /// perms.set_readonly(true); + /// file.set_permissions(perms).await?; + /// Ok(()) + /// } + /// ``` + pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> { + self.inner.set_permissions(perm).await + } + + /// Read the file through AIO at `offset` to the [`buffer`] with provided [`flags`]. + /// + /// # Examples + /// + /// ```no_run + /// use std::path::PathBuf; + /// use ylong_runtime::aio::AioFile; + /// use ylong_runtime::fs::OpenOptions; + /// use ylong_runtime::aio::aio_context; + /// use ylong_runtime::flags; + /// use ylong_io::sys::{LockedBuf, ReadFlags, WriteFlags}; + /// #[cfg(target_os = "linux")] + /// async fn aio_read() -> std::io::Result<()> { + /// let (aio, aio_handle) = aio_context(8, false).unwrap(); + /// let file_path = "file_aio6.txt"; + /// let mut open_options = OpenOptions::new(); + /// open_options.read(true).create_new(true); + /// let mut path_buf = PathBuf::new(); + /// path_buf.push(file_path); + /// let file = open_options.aio_open(path_buf, false).await.unwrap(); + /// let mut read_buf = LockedBuf::with_capacity(1024).unwrap(); + /// file.read_at(&aio_handle, 0, &mut read_buf, 1024, flags!(ReadFlags::Empty)) + /// .await.unwrap(); + /// aio.close().await; + /// Ok(()) + /// } + /// ``` + pub async fn read_at< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, + >( + &self, + aio_handle: &AioContextHandle, + offset: u64, + buffer: &mut LockedBuf, + len: u64, + flags: u32, + ) -> Result + where + A::LinkOps: LinkedListOps + Default, + { + assert!(len <= buffer.size() as u64); + aio_handle + .submit_request( + self, + FileCommand::Read { + offset, + buffer, + flags, + len, + }, + ) + .await + } + + /// Write to the file through AIO at `offset` from the [`buffer`] with provided [`flags`]. + /// + /// # Examples + /// + /// ```no_run + /// use std::path::PathBuf; + /// use ylong_runtime::aio::AioFile; + /// use ylong_runtime::fs::OpenOptions; + /// use ylong_runtime::aio::aio_context; + /// use ylong_runtime::flags; + /// use ylong_io::sys::{LockedBuf, ReadFlags, WriteFlags}; /// + /// #[cfg(target_os = "linux")] + /// async fn aio_write() -> std::io::Result<()> { + /// let (aio, aio_handle) = aio_context(8, false).unwrap(); + /// let file_path = "file_aio7.txt"; + /// let mut open_options = OpenOptions::new(); + /// open_options.create_new(true).append(true).write(true); + /// let mut path_buf = PathBuf::new(); + /// path_buf.push(file_path); + /// let file = open_options.aio_open(path_buf, false).await.unwrap(); + /// let mut write_buf = LockedBuf::with_capacity(1024).unwrap(); + /// for i in 0..write_buf.size() { + /// write_buf.as_mut()[i] = (i % 0xff) as u8; + /// } + /// file.write_at(&aio_handle, 0, &write_buf, 1024, flags!(WriteFlags::Append)) + /// .await + /// .unwrap(); + /// aio.close().await; + /// Ok(()) + /// } + /// ``` + pub async fn write_at< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, + >( + &self, + aio_handle: &AioContextHandle, + offset: u64, + buffer: &LockedBuf, + len: u64, + flags: u32, + ) -> Result + where + A::LinkOps: LinkedListOps + Default, + { + assert!(len <= buffer.size() as u64); + aio_handle + .submit_request( + self, + FileCommand::Write { + offset, + buffer, + flags, + len, + }, + ) + .await + } + + /// Sync data and metadata through AIO + /// + /// # Examples + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// use ylong_runtime::aio::aio_context; + /// use ylong_runtime::flags; + /// use ylong_io::sys::{LockedBuf, WriteFlags}; + /// + /// async fn aio_sync_all() -> std::io::Result<()> { + /// let (aio, aio_handle) = aio_context(8, false).unwrap(); + /// let mut f = AioFile::create("file_aio7.txt", false).await?; + /// let mut write_buf = LockedBuf::with_capacity(1024).unwrap(); + /// for i in 0..write_buf.size() { + /// write_buf.as_mut()[i] = (i % 0xff) as u8; + /// } + /// f.write_at(&aio_handle, 0, &write_buf, 1024, flags!(WriteFlags::Append)) + /// .await + /// .unwrap(); + /// + /// f.sync_all(&aio_handle).await?; + /// aio.close().await; + /// Ok(()) + /// } + /// ``` + pub async fn sync_all< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, + >( + &self, + aio_handle: &AioContextHandle, + ) -> Result<(), AioCommandError> + where + A::LinkOps: LinkedListOps + Default, + { + let r = aio_handle.submit_request(self, FileCommand::Fsync).await?; + if r != 0 { + return Err(AioCommandError::NonZeroCode); + } + Ok(()) + } + + /// This is intended for use cases that must synchronize content, but don't + /// need the metadata on disk. The goal of this method is to reduce disk + /// operations. + /// + /// See the [`crate::fs::File::sync_data`] method for more details. + /// + /// # Examples + /// + /// ```no_run + /// use ylong_runtime::aio::AioFile; + /// use ylong_runtime::aio::aio_context; + /// use ylong_runtime::flags; + /// use ylong_io::sys::{LockedBuf, WriteFlags}; + /// + /// async fn aio_sync_all() -> std::io::Result<()> { + /// let (aio, aio_handle) = aio_context(8, false).unwrap(); + /// let mut f = AioFile::create("file_aio8.txt", false).await?; + /// let mut write_buf = LockedBuf::with_capacity(1024).unwrap(); + /// for i in 0..write_buf.size() { + /// write_buf.as_mut()[i] = (i % 0xff) as u8; + /// } + /// f.write_at(&aio_handle, 0, &write_buf, 1024, flags!(WriteFlags::Append)) + /// .await + /// .unwrap(); + /// + /// f.sync_data(&aio_handle).await?; + /// aio.close().await; + /// Ok(()) + /// } + /// ``` + pub async fn sync_data< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, + >( + &self, + aio_handle: &AioContextHandle, + ) -> Result<(), AioCommandError> + where + A::LinkOps: LinkedListOps + Default, + { + let r = aio_handle.submit_request(self, FileCommand::Fdsync).await?; + if r != 0 { + return Err(AioCommandError::NonZeroCode); + } + Ok(()) + } +} + +impl AsRawFd for AioFile { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl AsRawFd for &'_ AioFile { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} diff --git a/ylong_runtime/src/aio/fs/mod.rs b/ylong_runtime/src/aio/fs/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..b224aab3e31fe8a9eddc831f4625358f709ddfb4 --- /dev/null +++ b/ylong_runtime/src/aio/fs/mod.rs @@ -0,0 +1,14 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +mod file; +pub use file::AioFile; \ No newline at end of file diff --git a/ylong_runtime/src/aio/lib.rs b/ylong_runtime/src/aio/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0c66130dfb93546750b23852f426c232ae4e83c --- /dev/null +++ b/ylong_runtime/src/aio/lib.rs @@ -0,0 +1,610 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{fmt, io, ptr}; +use std::future::Future; +use std::os::fd::{AsRawFd, RawFd}; +use std::sync::{Arc, Weak}; +use ylong_io::sys::{AtomicNode, DefaultLinkOps, FileCommand, iocb, Link, LinkedListOps, NoopLock, RawMutexTrait as RawMutex, RequestMutex}; +use ylong_io::sys::{AioContextT}; +use ylong_io::sys::{io_destroy, io_setup, io_submit, io_get_events}; +use ylong_io::sys::StreamExt; +use crate::spawn; +use crate::aio::aio_future::AioFuture; +use crate::aio::error::{AioCommandError, AioContextError}; +use crate::aio::eventfd::EventFd; +use crate::aio::requests::intrusive_adapter::{IntrusiveAdapter, LocalRequestAdapter, SyncRequestAdapter}; +use crate::aio::requests::{Request, Requests}; +use crate::sync::{Semaphore, oneshot}; +use crate::task::yield_now; + +pub(crate) struct AioContextInner< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> where + A::LinkOps: LinkedListOps + Default, +{ + context: AioContextT, + eventfd: RawFd, + num_slots: usize, + pub(crate) capacity: Option, + pub(crate) requests: RequestMutex>, + stop_tx: RequestMutex>>, +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> AioContextInner + where + A::LinkOps: LinkedListOps + Default, +{ + fn new( + eventfd: RawFd, + nr: usize, + use_semaphore: bool, + stop_tx: oneshot::Sender<()>, + ) -> Result, AioContextError> { + let mut context: AioContextT = 0; + + unsafe { + if io_setup(nr as libc::c_long, &mut context) != 0 { + return Err(AioContextError::IoSetup(io::Error::last_os_error())); + } + }; + + Ok(AioContextInner { + context, + requests: RequestMutex::new(Requests::new(nr)?), + eventfd, + capacity: if use_semaphore { + Some(Semaphore::new(nr)?) + } else { + None + }, + stop_tx: RequestMutex::new(Some(stop_tx)), + num_slots: nr, + }) + } +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> Drop for AioContextInner + where + A::LinkOps: LinkedListOps + Default, +{ + fn drop(&mut self) { + let result = unsafe { io_destroy(self.context) }; + if result != 0 { + panic!("io_destroy returned bad code") + } + } +} + +pub struct AioContext< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> where + A::LinkOps: LinkedListOps + Default, +{ + inner: Arc>, +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> fmt::Debug for AioContext + where + A::LinkOps: LinkedListOps + Default, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("AioContext") + .field("num_slots", &self.inner.num_slots) + .finish() + } +} + +pub struct AioContextHandle< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> where + A::LinkOps: LinkedListOps + Default, +{ + inner: Weak>, +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> Clone for AioContextHandle + where + A::LinkOps: LinkedListOps + Default, +{ + fn clone(&self) -> Self { + AioContextHandle { + inner: self.inner.clone(), + } + } +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> AioContextHandle + where + A::LinkOps: LinkedListOps + Default, +{ + /// Number of available AIO slots left in the context + /// + /// Return None if AIO context stopped, or if `use_semaphore` + /// was set to `false` + pub fn available_slots(&self) -> Option { + self.inner + .upgrade() + .and_then(|i| i.capacity.as_ref().map(|c| c.current_permits())) + } + + // Submit command to the AIO context + pub(crate) async fn submit_request( + &self, + fd: &impl AsRawFd, + mut command: FileCommand<'_>, + ) -> Result { + println!("submit_request"); + let inner_context = self + .inner + .upgrade() + .ok_or_else(|| AioCommandError::AioStopped)? + .clone(); + + if let Some(cap) = &inner_context.capacity { + cap.acquire().await?; + } + + let mut request = inner_context + .requests + .lock() + .take() + .ok_or(AioCommandError::CapacityExceeded)?; + + let request_addr = request.aio_addr(); + + let (tx, rx) = oneshot::channel(); + + let result = { + let mut request_ptr_array: [*mut iocb; 1] = [ptr::null_mut(); 1]; + + request.set_request( + &mut request_ptr_array, + request_addr, + inner_context.eventfd, + fd.as_raw_fd(), + &mut command, + tx, + ); + + unsafe { + io_submit( + inner_context.context, + 1, + request_ptr_array.as_mut_ptr(), + ) + } + }; + println!("result {result}"); + + if result != 1 { + drop(request.inner.lock().take_lifetime_extender()); + inner_context + .requests + .lock() + .return_in_flight_to_ready(request); + return Err(AioCommandError::IoSubmit(io::Error::last_os_error())); + } + println!("submit success"); + let base = AioFuture::new(&inner_context, rx, request); + + let code = base.await?; + println!("write_at finish"); + if code < 0 { + Err(AioCommandError::BadResult(io::Error::from_raw_os_error( + -code as _, + ))) + } else { + Ok(code.try_into().unwrap()) + } + } +} + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> fmt::Debug for AioContextHandle + where + A::LinkOps: LinkedListOps + Default, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("AioContextHandle").finish() + } +} + +/// Create new AIO context with `nr` number of threads +pub fn generic_aio_context( + nr: usize, + use_semaphore: bool, +) -> Result< + ( + AioContext, + AioContextHandle, + impl Future>, + ), + AioContextError, +> + where + A: IntrusiveAdapter, + A::LinkOps: LinkedListOps + Default, + L: DefaultLinkOps + Default, + M: RawMutex, +{ + let mut eventfd = EventFd::new(0, use_semaphore)?; + println!("eventfd::new finish"); + let (stop_tx, stop_rx) = oneshot::channel(); + + let inner = Arc::new(AioContextInner::new( + eventfd.as_raw_fd(), + nr, + use_semaphore, + stop_tx, + )?); + + let context = inner.context; + + let poll_future = { + let inner = inner.clone(); + + async move { + let mut events = Vec::with_capacity(nr); + + while let Some(Ok(available)) = eventfd.next().await { + assert!(available > 0, "kernel reported zero ready events"); + assert!( + available <= nr as u64, + "kernel reported more events than number of maximum tasks" + ); + + unsafe { + let num_received = io_get_events( + context, + available as libc::c_long, + available as libc::c_long, + events.as_mut_ptr(), + ptr::null_mut::(), + ); + + if num_received < 0{ + return Err(io::Error::last_os_error()); + } + + assert!( + num_received == available as _, + "io_getevents received events num not equal to reported through eventfd" + ); + events.set_len(available as usize); + }; + + for event in &events { + let request_ptr = event.data as usize as *mut Request; + + let sent_succeeded = unsafe { &mut *request_ptr }.send(event.res); + + if !sent_succeeded { + drop( + unsafe { &*request_ptr } + .inner + .lock() + .take_lifetime_extender(), + ); + inner + .requests + .lock() + .return_outstanding_to_ready(request_ptr); + + } + } + } + + Ok(()) + } + } + ; + + let background = async move { + pin_mut!(poll_future); + + + select_inner! { + res = poll_future => res, + _ = stop_rx => Ok(()), + } + }; + + let handle = AioContextHandle { + inner: Arc::downgrade(&inner), + }; + + println!("generic_aio_context finish"); + Ok((AioContext { inner }, handle, background)) +} + + + +impl< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> AioContext + where + A::LinkOps: LinkedListOps + Default, +{ + /// Number of available AIO slots left in the context + pub fn available_slots(&self) -> Option { + self.inner.capacity.as_ref().map(|c| c.current_permits()) + } + + /// Close the AIO context and wait for all related running futures to complete. + pub async fn close(self) { + println!("close aio"); + self.inner.stop_tx.lock().take().unwrap().send(()).unwrap(); + while Arc::strong_count(&self.inner) != 1 { + yield_now().await; + } + } +} + +/// Create new AIO context suitable for cross-threaded environment. +#[inline] +pub fn aio_context( + nr: usize, + use_semaphore: bool, +) -> Result<(GlobalAioContext, GlobalAioContextHandle), AioContextError> { + let (aio_context, aio_handle, background) = generic_aio_context(nr, use_semaphore)?; + spawn(background); + Ok((aio_context, aio_handle)) +} +/// AIO context suitable for cross-threaded environment +pub type GlobalAioContext = + AioContext; +/// AIO context handle suitable for cross-threaded environment +pub type GlobalAioContextHandle = + AioContextHandle; +/// Create new AIO context suitable for single-threaded environment +#[inline] +pub fn local_aio_context( + nr: usize, + use_semaphore: bool, +) -> Result< + ( + LocalAioContext, + LocalAioContextHandle, + impl Future>, + ), + AioContextError, +> { + generic_aio_context(nr, use_semaphore) +} +/// AIO context suitable for cross-threaded environment +pub type LocalAioContext = AioContext; + +/// AIO context handle suitable for single-threaded environment +pub type LocalAioContextHandle = + AioContextHandle; + +#[cfg(test)] +mod test { + use std::ffi::c_void; + use std::path::PathBuf; + use crate::fs::OpenOptions; + use ylong_io::sys::{LockedBuf, ReadFlags, WriteFlags}; + use crate::aio::error::AioContextError; + use crate::aio::lib::{generic_aio_context, GlobalAioContext, GlobalAioContextHandle}; + use crate::spawn; + use crate::block_on; + + #[test] + fn ut_global_aio_test1() { + let file_path = "aio_file.txt"; + + let handle = spawn(async { get_aio_context().await }); + let (aio, aio_handle) = block_on(handle).unwrap().unwrap(); + + let handle2 = spawn(async move { + aio_task(file_path, &aio_handle).await + }); + block_on(handle2).unwrap(); + + let handle3 = spawn(async move { + close_aio(aio).await + }); + block_on(handle3).unwrap(); + + std::fs::remove_file(file_path).unwrap(); + } + + async fn get_aio_context() -> Result<(GlobalAioContext, GlobalAioContextHandle), AioContextError> { + let (aio_context, aio_handle, background) = generic_aio_context(3, true)?; + spawn(background); + Ok((aio_context, aio_handle)) + } + + async fn close_aio(aio_context: GlobalAioContext) { + aio_context.close().await; + } + async fn aio_task(file_path: &str, aio_handle: &GlobalAioContextHandle) { + println!("aio task start"); + + let dir = file_path; + + let mut open_options = OpenOptions::new(); + open_options + .read(true) + .create_new(true) + .append(true) + .write(true); + + let mut path_buf = PathBuf::new(); + path_buf.push(dir); + + let file = open_options + .aio_open(path_buf, false).await.unwrap(); + + let mut write_buf = LockedBuf::with_capacity(512).unwrap(); + for i in 0..write_buf.size() { + write_buf.as_mut()[i] = 0; + } + println!("write_at action"); + file.write_at(aio_handle, 0, &write_buf, 512, flags!(WriteFlags::Append)) + .await + .unwrap(); + + let mut read_buf = LockedBuf::with_capacity(512).unwrap(); + + file.read_at(aio_handle, 0, &mut read_buf, 512, flags!(ReadFlags::Empty)) + .await.unwrap(); + + assert_eq!(read_buf.as_ref(), write_buf.as_ref()); + + println!("all good!"); + } + + + use std::{io, ptr}; + use std::os::fd::{AsRawFd, RawFd}; + use libc::{__u32, __u64, c_long, c_uint, EFD_NONBLOCK, eventfd, O_CLOEXEC, sigset_t, SYS_io_uring_enter, SYS_io_uring_register, SYS_io_uring_setup, syscall}; + + + const SYSCALL_REGISTER: c_long = SYS_io_uring_register; + const SYSCALL_SETUP: c_long = SYS_io_uring_setup; + const SYSCALL_ENTER: c_long = SYS_io_uring_enter; + const IORING_REGISTER_EVENTFD: BindgenTy7 = 4; + const IORING_ENTER_GETEVENTS: u32 = 1; + // const IORING_REGISTER_BUFFERS: BindgenTy7 = 0; + // const IORING_ENTER_SQ_WAIT: u32 = 4; + type BindgenTy7 = c_uint; + #[repr(C)] + #[derive(Debug, Default, Copy, Clone)] + pub struct io_uring_params { + pub sq_entries: __u32, + pub cq_entries: __u32, + pub flags: __u32, + pub sq_thread_cpu: __u32, + pub sq_thread_idle: __u32, + pub features: __u32, + pub wq_fd: __u32, + pub resv: [__u32; 3usize], + pub sq_off: io_sqring_offsets, + pub cq_off: io_cqring_offsets, + } + #[repr(C)] + #[derive(Debug, Default, Copy, Clone)] + pub struct io_sqring_offsets { + pub head: __u32, + pub tail: __u32, + pub ring_mask: __u32, + pub ring_entries: __u32, + pub flags: __u32, + pub dropped: __u32, + pub array: __u32, + pub resv1: __u32, + pub user_addr: __u64, + } + #[repr(C)] + #[derive(Debug, Default, Copy, Clone)] + pub struct io_cqring_offsets { + pub head: __u32, + pub tail: __u32, + pub ring_mask: __u32, + pub ring_entries: __u32, + pub overflow: __u32, + pub cqes: __u32, + pub flags: __u32, + pub resv1: __u32, + pub user_addr: __u64, + } + #[test] + fn ut_global_aio_test2() { + let mut parameters = io_uring_params { + sq_entries: 0, + cq_entries: 0, + flags: 0, + sq_thread_cpu: 0, + sq_thread_idle: 0, + features: 0, + wq_fd: 0, + resv: Default::default(), + sq_off: Default::default(), + cq_off: Default::default(), + }; + let p = &mut parameters as *mut io_uring_params; + + + let ret = unsafe { libc::syscall(SYSCALL_SETUP, 256 as c_long, p as c_long)}; + println!("ret {:?}",ret); + to_result(ret).unwrap(); + let flags = O_CLOEXEC | EFD_NONBLOCK as i32; + let fd = unsafe { + eventfd(3 as c_uint, flags) + }.as_raw_fd(); + let p = cast_ptr::(&fd).cast() as *const c_void; + let ret2 = unsafe {syscall(SYSCALL_REGISTER, ret as c_long, IORING_REGISTER_EVENTFD as c_long, + p as c_long, 1 as c_long)}; + println!("ret2 {}",ret2); + to_result(ret2 as _).unwrap(); + let p = ptr::null::().cast() as *const c_void; + let ret3 = unsafe { + syscall(SYSCALL_ENTER, + ret as c_long, + 0 as c_long, + 0 as c_long, + IORING_ENTER_GETEVENTS as c_long, + p as c_long, + 128 as c_long,) + }; + println!("ret3 {}",ret3); + to_result(ret3 as _).unwrap(); + println!("finish"); + } + + fn to_result(ret: c_long) -> io::Result { + if ret >= 0 { + Ok(ret) + } else { + Err(io::Error::last_os_error()) + } + } + fn cast_ptr(n: &T) -> *const T { + n + } +} + + diff --git a/ylong_runtime/src/aio/mod.rs b/ylong_runtime/src/aio/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..027f720bd30b1ba814b7895f57e4e97f1ade60a6 --- /dev/null +++ b/ylong_runtime/src/aio/mod.rs @@ -0,0 +1,331 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! AIO + +/// AIO command flags.Multiple commands can be passed in simultaneously +/// +/// # Examples +/// ```no_run +/// use ylong_io::{FileCommand, WriteFlags}; +/// use ylong_runtime::flags; /// +/// fn do_task() { +/// let command = flags!(WriteFlags::Append, WriteFlags::Sync); +/// } +/// ``` +#[macro_export] +macro_rules! flags { + ($($t: expr),*) => { + { + let mut result = 0u32; + $( + result |= $t as u32; + )* + result + } + }; +} + +macro_rules! poll_ready { + ($e:expr) => { + match $e { + std::task::Poll::Ready(t) => t, + std::task::Poll::Pending => return Poll::Pending, + } + }; +} + +macro_rules! pin_mut { + ($($x:ident),* $(,)?) => { $( + // Move the value to ensure that it is owned + let mut $x = $x; + // Shadow the original binding so that it can't be directly accessed + // ever again. + #[allow(unused_mut)] + let mut $x = unsafe { + core::pin::Pin::new_unchecked(&mut $x) + }; + )* } +} + +macro_rules! container_of { + ($ptr:expr, $container:path, $field:ident) => { + #[allow(clippy::cast_ptr_alignment)] + { + ($ptr as *const _ as *const u8).sub(offset_of!($container, $field)) + as *const $container + } + }; +} + +// macro_rules! offset_of { +// ($parent:path, $field:tt) => {{ +// std::mem::offset_of!($parent, $field) +// }}; +// } +macro_rules! offset_of { + ($parent:path, $field:tt) => {{ + // Get a base pointer (non-dangling if rustc supports `MaybeUninit`). + let_base_ptr!(base_ptr, $parent); + // Get field pointer. + let field_ptr = raw_field!(base_ptr, $parent, $field); + // Compute offset. + offset_from_unsafe!(field_ptr, base_ptr) + }}; +} + +macro_rules! offset_from_unsafe { + ($field:expr, $base:expr) => { + // Compute offset. + ($field as usize) - ($base as usize) + }; +} +macro_rules! raw_field { + ($base:expr, $parent:path, $field:tt) => {{ + field_check!($parent, $field); + let base = $base; // evaluate $base outside the `unsafe` block + + // Get the field address. + // Crucially, we know that this will not trigger a deref coercion because + // of the field check we did above. + #[allow(unused_unsafe)] // for when the macro is used in an unsafe block + unsafe { + addr_of!((*(base as *const $parent)).$field) + } + }}; +} + +macro_rules! addr_of { + ($path:expr) => {{ + std::ptr::addr_of!($path) + }}; +} +macro_rules! field_check { + ($type:path, $field:tt) => { + // Make sure the field actually exists. This line ensures that a + // compile-time error is generated if $field is accessed through a + // Deref impl. + #[allow(clippy::unneeded_field_pattern)] + let $type { $field: _, .. }; + }; +} +macro_rules! let_base_ptr { + ($name:ident, $type:ty) => { + // No UB here, and the pointer does not dangle, either. + // But we have to make sure that `uninit` lives long enough, + // so it has to be in the same scope as `$name`. That's why + // `let_base_ptr` declares a variable (several, actually) + // instead of returning one. + let uninit = std::mem::MaybeUninit::<$type>::uninit(); + let $name: *const $type = uninit.as_ptr(); + }; +} +macro_rules! select_inner { + ( { + // Branch from which the execution starts. + random = $bool:expr; + // Branch count. + ( $count:expr, $($_n:tt)* ) + // ( index:expr ) Branch's index + $( ( $index:expr, $($_i:tt)* ) $bind:pat = $fut:expr, if $c:expr => $handle:expr, )+ + // When all branches fail, executes this; + ; $else:expr + + }) => {{ + use std::future::Future; + use std::pin::Pin; + use std::task::Poll::{Ready, Pending}; + + enum Out { + Finish(T), + Fail, + } + + let branches_count: usize = $count; + + // The ith element indicates whether the ith branch is available. + let mut match_result = [true; $count]; + // Handle preconditions, this step cannot be handled within poll_fn() + $( + if (!$c) + { + match_result[$index] = false; + } + )* + + // When a branch ready first, modify this variable to + // branch's index to ensure that the branch is executed first. + use $crate::fastrand::fast_random; + let mut random_number = fast_random() as usize; + + let output = { + let mut futures = ( $( $fut , )+ ); + let futures = &mut futures; + + $crate::futures::poll_fn(|cx| { + let mut anyone_pending = false; + let random = $bool; + + for i in 0..branches_count { + let branch = match random { + true => {(random_number + i) % branches_count }, + false => i + }; + + $( + if (branch == $index && match_result[branch]) + { + let ( $($_i,)* fut, .. ) = &mut *futures; + let fut = unsafe { Pin::new_unchecked(fut) }; + match Future::poll(fut, cx) { + Ready(out) => { + // Check if the returned value match the user input. + match &out { + // If the match is successful, the inner value will + // never used, so here has to use unused_variables. + #[allow(unused_variables)] + $bind => { + // Change the random_number, ensure when return ready, this branch is executed first. + random_number = branch; + return Ready($crate::tuple_form!(($count) with Out::Fail except Out::Finish(out) at ($($_i)*))) + }, + // If the match fails, this branch set false, and wait for the next branch to complete. + // When user input is not match type, this patterns is unreachable. + #[allow(unreachable_patterns)] + _ => { + match_result[branch] = false; + continue; + } + } + }, + Pending => { + anyone_pending = true; + continue; + } + }; + } + )* + } + + if anyone_pending { + Pending + } else { + Ready($crate::tuple_form!(($count) with Out::Fail except Out::Fail at ($($_n)*))) + } + }).await + }; + + match output { + $( + $crate::tuple_form!(($count) with Out::Fail except Out::Finish($bind) at ($($_i)*)) => $handle, + )* + $crate::tuple_form!(($count) with Out::Fail except Out::Fail at ($($_n)*)) => $else, + // If there is only one branch and the user match for that branch returns `_`, + // there will be an unreachable pattern alert. + #[allow(unreachable_patterns)] + _ => unreachable!("finally match fail"), + } + }}; + + // if there is no 'else' branch, add the default 'else' branch. + ( { random = $bool:expr; $($t:tt)* } ) => { + select_inner!({ random = $bool; $($t)*; panic!("select!: All the branches failed.") }) + }; + // if there is an 'else' branch, add the 'else' branch into {}. + ( { random = $bool:expr; $($t:tt)* } else => $else:expr $(,)?) => { + select_inner!({ random = $bool; $($t)*; $else }) + }; + // Recursively join a branch into {}. + // The branch is separated by ',', has 'if' conditions and executes block finally. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_) $($t)* ($s, $($_n)*) $p = $f, if $c => $h, } $($r)*) + }; + // Recursively join a branch into {}. + // The branch is separated by ',', does not has 'if' conditions and executes block finally. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_) $($t)* ($s, $($_n)*) $p = $f, if true => $h, } $($r)*) + }; + // Recursively join a branch into {}. + // The branch is separated by ' ', has 'if' conditions and executes block finally. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_) $($t)* ($s, $($_n)*) $p = $f, if $c => $h, } $($r)*) + }; + // Recursively join a branch into {}. + // The branch is separated by ' ', does not has 'if' conditions and executes block finally. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => { + select_inner!( { random = $bool; ( $s + 1, $($_n)*_) $($t)* ($s, $($_n)*) $p = $f, if true => $h, } $($r)*) + }; + // Recursively join a branch into {}. + // The branch is separated by ',', has 'if' conditions and executes expressions finally. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_ ) $($t)* ($s, $($_n)*) $p = $f, if $c => $h, } $($r)*) + }; + // Recursively join a branch into {}. + // The branch is separated by ',', does not has 'if' conditions and executes expressions finally. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_ ) $($t)* ($s, $($_n)*) $p = $f, if true => $h, } $($r)*) + }; + // Recursively join the last branch into {}. + // The branch is separated by ',', has 'if' conditions and executes expressions finally. + // If the branch executes expressions finally, it can't separated by ' '. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_ ) $($t)* ($s, $($_n)*) $p = $f, if $c => $h, }) + }; + // Recursively join the last branch into {}. + // The branch is separated by ',', does not has 'if' conditions and executes expressions finally. + // If the branch executes expressions finally, it can't separated by ' '. + ( { random = $bool:expr; ( $s:expr, $($_n:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => { + select_inner!({ random = $bool; ( $s + 1, $($_n)*_ ) $($t)* ($s, $($_n)*) $p = $f, if true => $h, }) + }; + + // Usage entry. Starts with the first branch. + (biased; $p:pat = $($t:tt)* ) => { + select_inner!({ random = false; ( 0,) } $p = $($t)*) + }; + // Usage entry. Starts with random branch. + ( $p:pat = $($t:tt)* ) => { + select_inner!({ random = true; ( 0,) } $p = $($t)*) + }; + // There is no branch. + () => { + compile_error!("select! requires at least one branch.") + }; +} + +mod lib; +pub use lib::{aio_context, local_aio_context}; +mod eventfd; +pub mod requests; +mod fs; +pub use fs::AioFile; +mod aio_future; +mod error; +mod aiofd; + + + + + + + + + + + + + + + diff --git a/ylong_runtime/src/aio/requests/intrusive_adapter/adapter.rs b/ylong_runtime/src/aio/requests/intrusive_adapter/adapter.rs new file mode 100644 index 0000000000000000000000000000000000000000..15f89fa798919364e671963b25d54b53e851993b --- /dev/null +++ b/ylong_runtime/src/aio/requests/intrusive_adapter/adapter.rs @@ -0,0 +1,226 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::marker::PhantomData; +use ylong_io::sys::{Adapter, DefaultLinkOps, LinkOps, NoopLock, PointerOps}; +use ylong_io::sys::RawMutex; +use ylong_io::sys::Link; +use ylong_io::sys::RawMutexTrait; +use ylong_io::sys::AtomicNode; +use crate::aio::requests::Request; + +pub struct DefaultPointerOps(PhantomData); + +impl DefaultPointerOps { + #[inline] + pub const fn new() -> DefaultPointerOps { + DefaultPointerOps(PhantomData) + } +} + +impl Clone for DefaultPointerOps { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Copy for DefaultPointerOps {} + +impl Default for DefaultPointerOps { + #[inline] + fn default() -> Self { + Self::new() + } +} + +unsafe impl PointerOps for DefaultPointerOps> { + type Value = T; + type Pointer = Box; + + #[inline] + unsafe fn value_to_pointer(&self, raw: *const T) -> Box { + Box::from_raw(raw as *mut T) + } + + #[inline] + fn pointer_to_value(&self, ptr: Box) -> *const T { + Box::into_raw(ptr) as *const T + } +} + +/// Intrusive adapter suitable for storing `Request` +pub trait IntrusiveAdapter: + Adapter>>> +where + M: RawMutexTrait, + L: DefaultLinkOps + Default, +{ + /// Create new intrusive adapter + fn new() -> Self; +} + +/// SyncRequestAdapter +pub struct SyncRequestAdapter { + link_ops: ::Ops, + pointer_ops: DefaultPointerOps>>, +} +unsafe impl Send for SyncRequestAdapter {} +unsafe impl Sync for SyncRequestAdapter {} + +impl Copy for SyncRequestAdapter {} + +impl Clone for SyncRequestAdapter { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Default for SyncRequestAdapter { + #[inline] + fn default() -> Self { + Self::NEW + } +} + +impl SyncRequestAdapter { + /// Init SyncRequestAdapter + pub const NEW: Self = SyncRequestAdapter { + link_ops: AtomicNode::NEW, + pointer_ops: DefaultPointerOps::>>::new(), + }; + + /// Create new SyncRequestAdapter + #[inline] + pub fn new() -> Self { + Self::NEW + } +} +unsafe impl Adapter for SyncRequestAdapter { + type LinkOps = ::Ops; + type PointerOps = DefaultPointerOps>>; + + #[inline] + unsafe fn get_value(&self, link: ::LinkPtr) -> *const ::Value { + container_of!(link.as_ptr(),Request, link_node) + } + + #[inline] + unsafe fn get_link(&self, value: *const ::Value) -> ::LinkPtr { + let ptr = (value as *const u8).add(offset_of!(Request, link_node)); + core::ptr::NonNull::new_unchecked(ptr as *mut _) + } + + #[inline] + fn link_ops(&self) -> &Self::LinkOps { + &self.link_ops + } + + #[inline] + fn link_ops_mut(&mut self) -> &mut Self::LinkOps { + &mut self.link_ops + } + + #[inline] + fn pointer_ops(&self) -> &Self::PointerOps { + &self.pointer_ops + } +} + +/// LocalRequestAdapter +pub struct LocalRequestAdapter { + link_ops: ::Ops, + pointer_ops: DefaultPointerOps>>, +} +unsafe impl Send for LocalRequestAdapter {} +unsafe impl Sync for LocalRequestAdapter {} + +impl Copy for LocalRequestAdapter {} + +impl Clone for LocalRequestAdapter { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Default for LocalRequestAdapter { + #[inline] + fn default() -> Self { + Self::NEW + } +} + +impl LocalRequestAdapter { + /// Init LocalRequestAdapter + pub const NEW: Self = LocalRequestAdapter { + link_ops: Link::NEW, + pointer_ops: DefaultPointerOps::>>::new(), + }; + + /// Create new LocalRequestAdapter + #[inline] + pub fn new() -> Self { + Self::NEW + } +} +unsafe impl Adapter for LocalRequestAdapter { + type LinkOps = ::Ops; + type PointerOps = DefaultPointerOps::>>; + + #[inline] + unsafe fn get_value(&self, link: ::LinkPtr) -> *const ::Value { + container_of!(link.as_ptr(),Request, link_node) + } + + #[inline] + unsafe fn get_link(&self, value: *const ::Value) -> ::LinkPtr { + let ptr = (value as *const u8).add(offset_of!(Request, link_node)); + core::ptr::NonNull::new_unchecked(ptr as *mut _) + } + + #[inline] + fn link_ops(&self) -> &Self::LinkOps { + &self.link_ops + } + + #[inline] + fn link_ops_mut(&mut self) -> &mut Self::LinkOps { + &mut self.link_ops + } + + #[inline] + fn pointer_ops(&self) -> &Self::PointerOps { + &self.pointer_ops + } +} + +impl IntrusiveAdapter for SyncRequestAdapter { + fn new() -> Self { + SyncRequestAdapter::new() + } +} + +impl IntrusiveAdapter for LocalRequestAdapter { + fn new() -> Self { + LocalRequestAdapter::new() + } +} + + + + + + + diff --git a/ylong_runtime/src/aio/requests/intrusive_adapter/mod.rs b/ylong_runtime/src/aio/requests/intrusive_adapter/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..e09fb567e9b3eb64ff76965a2f571e4d34e43ed5 --- /dev/null +++ b/ylong_runtime/src/aio/requests/intrusive_adapter/mod.rs @@ -0,0 +1,19 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! intrusive_adapter +mod adapter; + +pub use adapter::{SyncRequestAdapter, LocalRequestAdapter, IntrusiveAdapter}; + + diff --git a/ylong_runtime/src/aio/requests/mod.rs b/ylong_runtime/src/aio/requests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ea78452fea8a2c291b5552e0efb2c7a03ed12166 --- /dev/null +++ b/ylong_runtime/src/aio/requests/mod.rs @@ -0,0 +1,165 @@ +// Copyright (c) 2023 Huawei Device Co., Ltd. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! AIO request to kernel + +use std::marker::PhantomData; +use std::{io, mem}; +use std::os::fd::RawFd; +use ylong_io::sys::{AioResult, DefaultLinkOps, FileCommand, iocb, LifetimeExtender, LinkedList, LinkedListOps, RequestMutex}; +use ylong_io::sys::RawMutexTrait as RawMutex; +use crate::aio::requests::intrusive_adapter::IntrusiveAdapter; +use crate::sync::oneshot; + +pub mod intrusive_adapter; +pub(crate) const IOCB_FLAG_RESFD: u32 = 1; + +pub(crate) struct RequestInner { + pub aio_request: iocb, + pub completed_tx: Option>, + pub lifetime_extender: Option, +} + +impl RequestInner { + pub(crate) fn take_lifetime_extender(&mut self) -> Option { + self.lifetime_extender.take() + } +} + +/// Aio Request +pub struct Request { + link_node: L, + pub(crate) inner: RequestMutex, +} + +impl Default for Request { + fn default() -> Self { + Request { + link_node: Default::default(), + inner: RequestMutex::new(RequestInner { + aio_request: unsafe { mem::zeroed() }, + completed_tx: None, + lifetime_extender: None, + }), + } + } +} + +impl Request { + + // Get the Request memory address and convert it to u64. + pub(crate) fn aio_addr(&self) -> u64 { + (self as *const Self as usize ) as u64 + } + + // Send data to waiter + pub(crate) fn send(&mut self, data: AioResult) -> bool { + self.inner + .lock() + .completed_tx + .take() + .expect("No completed_tx in received AIO request") + .send(data) + .is_ok() + } + + pub(crate) fn set_request( + &mut self, + request_ptr_array: &mut [*mut iocb; 1], + request_addr: u64, + eventfd: RawFd, + fd: RawFd, + command: &mut FileCommand, + tx: oneshot::Sender, + ) { + let inner = &mut *self.inner.lock(); + + let (addr, buf_size) = command.buffer_addr().unwrap_or((0, 0)); + let len = command.len().unwrap_or(0); + + assert!(len <= buf_size, "len should be <= buffer.size()"); + + inner.aio_request.aio_data = request_addr; + inner.aio_request.aio_resfd = eventfd as u32; + inner.aio_request.aio_flags = IOCB_FLAG_RESFD | command.flags().unwrap_or(0); + inner.aio_request.aio_fildes = fd as u32; + inner.aio_request.aio_offset = command.offset().unwrap_or(0) as i64; + inner.aio_request.aio_buf = addr; + inner.aio_request.aio_nbytes = len; + inner.aio_request.aio_lio_opcode = command.operation_code() as u16; + + inner.lifetime_extender = command.lifetime_extender(); + inner.completed_tx = Some(tx); + request_ptr_array[0] = &mut inner.aio_request as *mut iocb; + } +} + +/// AIO request pool +pub struct Requests< + M: RawMutex, + A: IntrusiveAdapter, + L: DefaultLinkOps + Default, +> where + A:: LinkOps: LinkedListOps + Default , +{ + ready_pool: LinkedList, + outstanding: LinkedList, + _request_mutex: PhantomData, + _link_ops: PhantomData, +} + +impl Requests +where + M: RawMutex, + A: IntrusiveAdapter, + A::LinkOps: LinkedListOps + Default, + L: DefaultLinkOps + Default, +{ + pub(crate) fn new(nr: usize) -> Result { + let outstanding = LinkedList::new(A::new()); + let mut ready_pool = LinkedList::new(A::new()); + + for _ in 0..nr { + ready_pool.push_back(Box::>::default()); + } + + Ok(Requests { + ready_pool, + outstanding, + _request_mutex: Default::default(), + _link_ops: Default::default(), + }) + } + + pub(crate) fn move_to_outstanding(&mut self, ptr: Box>) { + self.outstanding.push_back(ptr); + } + + pub(crate) fn return_outstanding_to_ready(&mut self, request: *const Request) { + let mut cursor = unsafe { self.outstanding.cursor_mut_from_ptr(request) }; + + self.ready_pool.push_back( + cursor.remove().expect( + "Could not find item in outstanding list while trying to move to ready_pool", + ), + ); + } + + pub(crate) fn return_in_flight_to_ready(&mut self, req: Box>) { + self.ready_pool.push_back(req); + } + + pub(crate) fn take(&mut self) -> Option>> { + self.ready_pool.pop_front() + } +} diff --git a/ylong_runtime/src/builder/multi_thread_builder.rs b/ylong_runtime/src/builder/multi_thread_builder.rs index 397ab68b64cbc0390aa78a068c5d1fdd4db9a914..785b381621eecdacc121e9d2c3fe9802c933a60b 100644 --- a/ylong_runtime/src/builder/multi_thread_builder.rs +++ b/ylong_runtime/src/builder/multi_thread_builder.rs @@ -225,6 +225,7 @@ impl MultiThreadBuilder { /// Initializes the runtime and returns its instance. #[cfg(feature = "multi_instance_runtime")] pub fn build(&mut self) -> io::Result { + println!("MultiThreadBuilder build"); use crate::builder::initialize_async_spawner; let async_spawner = initialize_async_spawner(self)?; diff --git a/ylong_runtime/src/executor/async_pool.rs b/ylong_runtime/src/executor/async_pool.rs index 963c5eab2651775157d71048a70927b24d19f75b..bf67a57666142089e8ba74a4e6882360e2fe340f 100644 --- a/ylong_runtime/src/executor/async_pool.rs +++ b/ylong_runtime/src/executor/async_pool.rs @@ -412,6 +412,7 @@ fn async_thread_proc(inner: Arc, worker: Arc, handle: Arc impl AsyncPoolSpawner { pub(crate) fn new(builder: &MultiThreadBuilder) -> io::Result { let (handle, driver) = Driver::initialize(); + println!("asyncpoolspawner after driver::initializew"); let thread_num = builder.core_thread_size.unwrap_or_else(get_cpu_core); let spawner = AsyncPoolSpawner { @@ -433,6 +434,7 @@ impl AsyncPoolSpawner { } fn create_async_thread_pool(&self, driver: Arc>) -> io::Result<()> { + println!("create_async_thread_pool"); let mut workers = vec![]; for index in 0..self.inner.total { let local_queue = self.exe_mng_info.create_local_queue(index); diff --git a/ylong_runtime/src/executor/driver.rs b/ylong_runtime/src/executor/driver.rs index 9f957439aa1737a78d4ee1bd363557b2afe1f6e4..2930626215475a12a45109f9496f7df6194d506d 100644 --- a/ylong_runtime/src/executor/driver.rs +++ b/ylong_runtime/src/executor/driver.rs @@ -47,6 +47,7 @@ pub(crate) struct Driver { impl Driver { pub(crate) fn initialize() -> (Arc, Arc>) { + println!("Driver initialize"); #[cfg(feature = "net")] let (io_handle, io_driver) = IoDriver::initialize(); #[cfg(feature = "time")] diff --git a/ylong_runtime/src/executor/parker.rs b/ylong_runtime/src/executor/parker.rs index 2c3f2da1e9e675a3d8119e287bd65e2ec2d64ef9..977693d39b41d15db8bdecf8c28e8aebd805fa07 100644 --- a/ylong_runtime/src/executor/parker.rs +++ b/ylong_runtime/src/executor/parker.rs @@ -108,7 +108,6 @@ impl Inner { } Err(actual) => panic!("inconsistent park state; actual = {actual}"), } - let park_flag = driver.run(); match self.state.swap(IDLE, SeqCst) { diff --git a/ylong_runtime/src/fs/open_options.rs b/ylong_runtime/src/fs/open_options.rs index 687ecf3048a9160db3d514d50dfb8059d40faa19..617a8fd9d50f9fda6771052a4cab29d1aa3e0204 100644 --- a/ylong_runtime/src/fs/open_options.rs +++ b/ylong_runtime/src/fs/open_options.rs @@ -13,8 +13,13 @@ use std::fs::OpenOptions as SyncOpenOptions; use std::io; +#[cfg(all(target_os = "linux", feature = "aio"))] +use std::path::PathBuf; +#[cfg(all(target_os = "linux", feature = "aio"))] +use std::os::unix::prelude::OpenOptionsExt; use std::path::Path; - +#[cfg(all(target_os = "linux", feature = "aio"))] +use crate::aio::AioFile; use crate::fs::{async_op, File}; /// An asynchronous version of the [`std::fs::OpenOptions`]; @@ -298,6 +303,16 @@ impl OpenOptions { let file = async_op(move || options.open(path)).await?; Ok(File::new(file)) } + + #[cfg(all(target_os = "linux", feature = "aio"))] + pub(crate) async fn aio_open(mut self, path: PathBuf, is_sync: bool) -> io::Result { + self.0.custom_flags(libc::O_DIRECT); + if is_sync { + self.0.custom_flags(libc::O_SYNC); + } + let aio_file = self.open(path).await?; + Ok(AioFile {inner: aio_file}) + } } #[cfg(all(test, target_os = "linux"))] mod test { diff --git a/ylong_runtime/src/lib.rs b/ylong_runtime/src/lib.rs index 528ae8621740ed95657fedab3c9ebc9b2d9e447f..660ba77eef6c1802537d4f6df02e5d8c926c92b1 100644 --- a/ylong_runtime/src/lib.rs +++ b/ylong_runtime/src/lib.rs @@ -12,7 +12,6 @@ // limitations under the License. #![warn(missing_docs)] - //! # ylong_runtime //! A runtime for writing IO-bounded and CPU-bounded applications. @@ -82,6 +81,11 @@ cfg_net! { pub mod net; } +#[cfg(target_os = "linux")] +cfg_aio! { + pub mod aio; +} + #[cfg(target_os = "linux")] cfg_process! { pub mod process; diff --git a/ylong_runtime/src/macros.rs b/ylong_runtime/src/macros.rs index ae06bd49f6d1ecbe82ed60b0d5910aa96890355f..b7fd70376bf3d39914b90717295071ffa8a8cb20 100644 --- a/ylong_runtime/src/macros.rs +++ b/ylong_runtime/src/macros.rs @@ -10,6 +10,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(target_os = "linux")] +macro_rules! cfg_aio { + ($($item:item)*) => { + $( + #[cfg(feature = "aio")] + $item + )* + } +} macro_rules! cfg_net { ($($item:item)*) => { diff --git a/ylong_runtime/src/net/async_source.rs b/ylong_runtime/src/net/async_source.rs index a7b35df289f25b2baa97bc0b9c8066847c583f6e..c89adbbf0e9a032f79ea3e8740879a5976e114f3 100644 --- a/ylong_runtime/src/net/async_source.rs +++ b/ylong_runtime/src/net/async_source.rs @@ -17,6 +17,8 @@ use std::io::{Read, Write}; use std::mem::MaybeUninit; use std::ops::Deref; use std::sync::Arc; +#[cfg(all(target_os = "linux", feature = "aio"))] +use std::sync::atomic::Ordering::Relaxed; use std::task::{Context, Poll}; #[cfg(target_os = "linux")] @@ -61,6 +63,7 @@ impl AsyncSource { /// If no reactor is found or fd registration fails, an error will be /// returned. pub fn new(mut io: E, interest: Option) -> io::Result> { + println!("AsyncSource new()"); let inner = Handle::get_handle()?; let interest = interest.unwrap_or_else(|| Interest::READABLE | Interest::WRITABLE); @@ -102,11 +105,25 @@ impl AsyncSource { } cfg_net! { + #[cfg(all(target_os = "linux", feature = "aio"))] + pub(crate) fn clear_poll_ready( + &self, + cx: &mut Context<'_>, + interest: Interest, + ) -> io::Result<()> { + self.entry.status.fetch_and(!interest.as_usize(), Relaxed); + if self.poll_ready(cx, interest)?.is_ready() { + cx.waker().wake_by_ref(); + } + Ok(()) + } + pub(crate) fn poll_ready( &self, cx: &mut Context<'_>, interest: Interest, ) -> Poll> { + println!("async_source poll_ready"); let ready = self.entry.poll_readiness(cx, interest); let x = match ready { Poll::Ready(x) => x, @@ -224,6 +241,11 @@ impl AsyncSource { }) } } + + #[cfg(all(target_os = "linux", feature = "aio"))] + pub(crate) fn get_mut(&mut self) -> &mut E { + self.io.as_mut().unwrap() + } } impl Debug for AsyncSource { diff --git a/ylong_runtime/src/net/driver.rs b/ylong_runtime/src/net/driver.rs index 4472f3dd20c025b29c073a92b95012e7d796b3fc..dbe6b15f24e63e32942823e5e181c0021c498485 100644 --- a/ylong_runtime/src/net/driver.rs +++ b/ylong_runtime/src/net/driver.rs @@ -185,7 +185,7 @@ impl IoDriver { { return; } - + println!("wake the io task"); // Wake the io task io.wake(ready) } @@ -194,6 +194,7 @@ impl IoDriver { #[cfg(not(feature = "ffrt"))] impl IoDriver { pub(crate) fn initialize() -> (IoHandle, IoDriver) { + println!("IoDriver initialize"); let poll = Poll::new().unwrap_or_else(|e| panic!("IO poller initialize failed, error: {e}")); let waker = ylong_io::Waker::new(&poll, WAKE_TOKEN) @@ -280,6 +281,7 @@ impl IoDriver { .fetch_add(events.len() as u64, std::sync::atomic::Ordering::AcqRel); self.events = Some(events); + println!("has_event {}", has_events); Ok(has_events) } diff --git a/ylong_runtime/src/net/sys/unix/mod.rs b/ylong_runtime/src/net/sys/unix/mod.rs index db27dd56d1d2a99619f73f1458d104965b0a4bf5..30656965c9cdc628ea36f0010dae0353e9fc0dc6 100644 --- a/ylong_runtime/src/net/sys/unix/mod.rs +++ b/ylong_runtime/src/net/sys/unix/mod.rs @@ -19,3 +19,4 @@ pub use listener::UnixListener; mod stream; pub use stream::UnixStream; + diff --git a/ylong_runtime/src/sync/oneshot.rs b/ylong_runtime/src/sync/oneshot.rs index f84c378480d888d03addd6231d5153c2a2d7cacc..7f0995d588127b227a1920880027fc8591375211 100644 --- a/ylong_runtime/src/sync/oneshot.rs +++ b/ylong_runtime/src/sync/oneshot.rs @@ -390,6 +390,8 @@ impl Future for Receiver { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let t = self.channel.state.load(Acquire); + println!("self.channel.state.load(Acquire) == {t}"); match self.channel.state.load(Acquire) { INIT => { self.channel.waker.register_by_ref(cx.waker()); @@ -430,10 +432,13 @@ impl Channel { fn take_value_sent(&self) -> Result { match self.take_value() { Some(val) => { + println!("take_value Some"); self.state.store(CLOSED, Release); Ok(val) } - None => Err(RecvError), + None => { + println!("take_value false"); + Err(RecvError)}, } }