profile
viewpoint

japaric/xargo 782

The sysroot manager that lets you build and customize `std`

fortanix/rust-sgx 228

The Fortanix Rust Enclave Development Platform

fortanix/rust-mbedtls 28

Idiomatic Rust wrapper for MbedTLS

JAndritsch/mtgextractor 20

A Ruby gem to extract MTG card and set data from the Gatherer web site

jethrogb/audittools 4

Tools to interpret auditd logs

jethrogb/b64 1

Base64 encoding/decoding in Rust, originally from rustc-serialize.

jethrogb/cloudproxy 1

The CloudProxy Tao for Trustworthy Computing

akash-fortanix/ring 0

Safe, fast, small crypto using Rust

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl EnclaveState {             usercall_ext,             threads_queue,             forward_panics,+            fifo_guards: Mutex::new(None),+            return_queue_tx: Mutex::new(None),         })     } +    async fn handle_usercall(+        enclave: Arc<EnclaveState>,+        work_sender: crossbeam::crossbeam_channel::Sender<Work>,+        tx_return_channel: tokio::sync::mpsc::UnboundedSender<(EnclaveResult, EnclaveEntry)>,+        mut handle_data: UsercallHandleData,+    ) {+        let (parameters, mode, tcs) = match handle_data {+            UsercallHandleData::Sync(ref usercall, ref mut tcs, _) => (usercall.parameters(), tcs.mode, Some(tcs)),+            UsercallHandleData::Async(ref usercall)                => (usercall.data.parameters(), EnclaveEntry::ExecutableNonMain, None),+        };+        let mut input = IOHandlerInput { enclave: enclave.clone(), tcs, work_sender: &work_sender };+        let handler = Handler(&mut input);+        let (_handler, result) = {+            let (p1, p2, p3, p4, p5) = parameters;+            dispatch(handler, p1, p2, p3, p4, p5).await+        };+        let ret = match result {+            Ok(ret) => {+                match handle_data {+                    UsercallHandleData::Sync(usercall, tcs, _) => {+                        work_sender.send(Work {+                            tcs,+                            entry: CoEntry::Resume(usercall, ret),+                        }).expect("Work sender couldn't send data to receiver");+                    }+                    UsercallHandleData::Async(usercall) => {+                        let return_queue_tx = enclave.return_queue_tx.lock().await.clone().expect("return_queue_tx not initialized");+                        let ret = Identified {+                            id: usercall.id,+                            data: Return(ret.0, ret.1),+                        };+                        return_queue_tx.send(ret).await.unwrap();+                    }+                }+                return;+            }+            Err(EnclaveAbort::Exit { panic: true }) => {+                let panic = match handle_data {+                    UsercallHandleData::Sync(usercall, _, debug_buf) => {+                        println!("Attaching debugger");+                        #[cfg(unix)]+                        trap_attached_debugger(usercall.tcs_address() as _).await;+                        EnclavePanic::from(debug_buf.into_inner())+                    }+                    UsercallHandleData::Async(_) => {+                        // FIXME: find a better panic message

Just point to https://github.com/fortanix/rust-sgx/issues/235#issuecomment-641811437 which will fix this

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 struct StoppedTcs { }  struct IOHandlerInput<'tcs> {-    tcs: &'tcs mut RunningTcs,+    tcs: Option<&'tcs mut RunningTcs>,     enclave: Arc<EnclaveState>,     work_sender: &'tcs crossbeam::crossbeam_channel::Sender<Work>, } +struct PendingEvents {+    counts: [u32; Self::MAX_EVENT],+}

This reorders the delivery of events compared to the current situation (events with less bits set will trigger first). I don't think that matters too much, but is there any specific reason for changing this?

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 struct StoppedTcs { }  struct IOHandlerInput<'tcs> {-    tcs: &'tcs mut RunningTcs,+    tcs: Option<&'tcs mut RunningTcs>,     enclave: Arc<EnclaveState>,     work_sender: &'tcs crossbeam::crossbeam_channel::Sender<Work>, } +struct PendingEvents {+    counts: [u32; Self::MAX_EVENT],+}++impl PendingEvents {+    const MAX_EVENT: usize = 8;++    fn new() -> Self {+        // sanity check to ensure we update MAX_EVENT if new events are added in the future+        const EV_ALL: u64 = EV_USERCALLQ_NOT_FULL | EV_RETURNQ_NOT_EMPTY | EV_UNPARK;+        assert!(EV_ALL < Self::MAX_EVENT as u64);+        assert!(Self::MAX_EVENT <= 1usize + u8::max_value() as usize);

You can do all this at compile time.

// Will error if it doesn't fit in a `u64`
const EV_MAX_U64: u64 = (EV_USERCALLQ_NOT_FULL | EV_RETURNQ_NOT_EMPTY | EV_UNPARK) + 1;
const EV_MAX: usize = EV_MAX_U64 as _;
// Will error if it doesn't fit in a `usize`
const _ERROR_IF_USIZE_TOO_SMALL: u64 = u64::MAX + (EV_MAX_U64 - (EV_MAX as u64));

But realistically, having an array element for each event combination will fail long before there's any chance of overflows.

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl EnclaveState {             usercall_ext,             threads_queue,             forward_panics,+            fifo_guards: Mutex::new(None),+            return_queue_tx: Mutex::new(None),         })     } +    async fn handle_usercall(+        enclave: Arc<EnclaveState>,+        work_sender: crossbeam::crossbeam_channel::Sender<Work>,+        tx_return_channel: tokio::sync::mpsc::UnboundedSender<(EnclaveResult, EnclaveEntry)>,+        mut handle_data: UsercallHandleData,+    ) {+        let (parameters, mode, tcs) = match handle_data {+            UsercallHandleData::Sync(ref usercall, ref mut tcs, _) => (usercall.parameters(), tcs.mode, Some(tcs)),+            UsercallHandleData::Async(ref usercall)                => (usercall.data.parameters(), EnclaveEntry::ExecutableNonMain, None),

You should create a new mode, since this call is not related to an entry.

That will also provide clarity on handling of enclave aborts triggered by async usercalls.

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl<'tcs> IOHandlerInput<'tcs> {             }         }; -        let mut ret = None;--        if (self.tcs.pending_event_set & event_mask) != 0 {-            if let Some(pos) = self-                .tcs-                .pending_events-                .iter()-                .position(|ev| (ev & event_mask) != 0)-            {-                ret = self.tcs.pending_events.remove(pos);-                self.tcs.pending_event_set = self.tcs.pending_events.iter().fold(0, |m, ev| m | ev);-            }-        }+        // TODO: the ABI allows for calling wait() asynchronously with specific semantics

Please fix or file a GH issue and link to it

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, WithId};+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Transmittable,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Transmittable,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++pub(crate) struct Fifo<T> {

Please rename

  • Fifo -> FifoBuffer
  • FifoInner -> Fifo
mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, WithId};+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Transmittable,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Transmittable,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++pub(crate) struct Fifo<T> {+    data: Box<[WithId<T>]>,+    offsets: Box<AtomicUsize>,+}++impl<T: Transmittable> Fifo<T> {+    fn new(len: usize) -> Self {+        assert!(+            len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, || WithId { id: AtomicU64::new(0), data: T::default() });+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }+}++enum Storage<T> {+    Shared(Arc<Fifo<T>>),+    Static,+}++impl<T> Clone for Storage<T> {+    fn clone(&self) -> Self {+        match self {+            Storage::Shared(arc) => Storage::Shared(arc.clone()),+            Storage::Static => Storage::Static,+        }+    }+}++pub(crate) struct FifoInner<T> {+    data: NonNull<[UnsafeCell<WithId<T>>]>,+    offsets: NonNull<AtomicUsize>,+    storage: Storage<T>,+}++impl<T> Clone for FifoInner<T> {+    fn clone(&self) -> Self {+        Self {+            data: self.data.clone(),+            offsets: self.offsets.clone(),+            storage: self.storage.clone(),+        }+    }+}++impl<T: Transmittable> FifoInner<T> {+    pub(crate) unsafe fn from_descriptor(descriptor: FifoDescriptor<T>) -> Self {+        assert!(+            descriptor.len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        #[cfg(target_env = "sgx")] {+            use std::os::fortanix_sgx::usercalls::alloc::User;++            // `fortanix_sgx_abi::WithId` is not `Copy` because it contains an `AtomicU64`.+            // This type has the same memory layout but is `Copy` and can be marked as+            // `UserSafeSized` which is needed for the `User::from_raw_parts()` below.+            #[repr(C)]+            #[derive(Clone, Copy)]+            pub struct WithId<T> {+                pub id: u64,+                pub data: T,+            }+            unsafe impl<T: UserSafeSized> UserSafeSized for WithId<T> {}++            unsafe fn _sanity_check_with_id() {+                use std::mem::size_of;+                let _: [u8; size_of::<fortanix_sgx_abi::WithId<()>>()] = [0u8; size_of::<WithId<()>>()];+            }++            // check pointers are outside enclave range, etc.+            let data = User::<[WithId<T>]>::from_raw_parts(descriptor.data as _, descriptor.len);+            mem::forget(data);+        }+        let data_slice = std::slice::from_raw_parts_mut(descriptor.data, descriptor.len);+        Self {+            data: NonNull::new_unchecked(data_slice as *mut [WithId<T>] as *mut [UnsafeCell<WithId<T>>]),+            offsets: NonNull::new_unchecked(descriptor.offsets as *mut AtomicUsize),+            storage: Storage::Static,+        }+    }++    fn from_arc(fifo: Arc<Fifo<T>>) -> Self {+        unsafe {+            Self {+                data: NonNull::new_unchecked(fifo.data.as_ref() as *const [WithId<T>] as *mut [WithId<T>] as *mut [UnsafeCell<WithId<T>>]),+                offsets: NonNull::new_unchecked(fifo.offsets.as_ref() as *const AtomicUsize as *mut AtomicUsize),+                storage: Storage::Shared(fifo),+            }+        }+    }++    /// Consumes `self` and returns a DescriptorGuard.+    /// Panics if `self` was created using `from_descriptor`.+    pub(crate) fn into_descriptor_guard(self) -> DescriptorGuard<T> {+        let arc = match self.storage {+            Storage::Shared(arc) => arc,+            Storage::Static => panic!("Sender/Receiver created using `from_descriptor()` cannot be turned into DescriptorGuard."),+        };+        let data = unsafe { self.data.as_ref() };+        let descriptor = FifoDescriptor {+            data: data.as_ptr() as _,+            len: data.len(),+            offsets: self.offsets.as_ptr(),+        };+        DescriptorGuard { descriptor, _fifo: arc }+    }++    fn slot(&self, index: usize) -> &mut WithId<T> {+        unsafe { &mut *self.data.as_ref()[index].get() }+    }++    fn data_len(&self) -> usize {+        unsafe { self.data.as_ref().len() }+    }++    fn offsets(&self) -> &AtomicUsize {+        unsafe { self.offsets.as_ref() }+    }

These accessor function are unnecessary and in the case of slot hide too much unsafety.

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, WithId};+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Transmittable,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Transmittable,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++pub(crate) struct Fifo<T> {+    data: Box<[WithId<T>]>,+    offsets: Box<AtomicUsize>,+}++impl<T: Transmittable> Fifo<T> {+    fn new(len: usize) -> Self {+        assert!(+            len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, || WithId { id: AtomicU64::new(0), data: T::default() });+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }+}++enum Storage<T> {+    Shared(Arc<Fifo<T>>),+    Static,+}++impl<T> Clone for Storage<T> {+    fn clone(&self) -> Self {+        match self {+            Storage::Shared(arc) => Storage::Shared(arc.clone()),+            Storage::Static => Storage::Static,+        }+    }+}++pub(crate) struct FifoInner<T> {+    data: NonNull<[UnsafeCell<WithId<T>>]>,+    offsets: NonNull<AtomicUsize>,

I don't know if it's useful to have these as NonNull. Just make them &'static ... instead.

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl EnclaveState {         };         let enclave_clone = enclave.clone();         let io_future = async move {+            let (usercall_queue_synchronizer, return_queue_synchronizer, sync_usercall_tx) = QueueSynchronizer::new(enclave_clone.clone());++            let (usercall_queue_tx, usercall_queue_rx) = ipc_queue::bounded_async(USERCALL_QUEUE_SIZE, usercall_queue_synchronizer);+            let (return_queue_tx, return_queue_rx) = ipc_queue::bounded_async(RETURN_QUEUE_SIZE, return_queue_synchronizer);++            let fifo_guards = FifoGuards {+                usercall_queue: usercall_queue_tx.into_descriptor_guard(),+                return_queue: return_queue_rx.into_descriptor_guard(),+                async_queues_called: false,+            };++            *enclave_clone.fifo_guards.lock().await = Some(fifo_guards);+            *enclave_clone.return_queue_tx.lock().await = Some(return_queue_tx);++            tokio::task::spawn_local(async move {+                while let Ok(usercall) = usercall_queue_rx.recv().await {+                    let _ = io_queue_send.send(UsercallSendData::Async(usercall));+                }+            });

Instead of the forwarding, can you do something like recv_queue.select(usercall_queue_rx.recv()) in the loop below?

mzohreva

comment created time in 6 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl Usercalls { /// [`EV_RETURNQ_NOT_EMPTY`]: ../constant.EV_RETURNQ_NOT_EMPTY.html pub mod async {     use super::*;-    use core::sync::atomic::AtomicUsize;+    use core::sync::atomic::{AtomicU64, AtomicUsize}; -    /// An identified usercall.     #[repr(C)]-    #[derive(Copy, Clone)]-    #[cfg_attr(feature = "rustc-dep-of-std", unstable(feature = "sgx_platform", issue = "56975"))]-    pub struct Usercall {-        /// `0` indicates this slot is empty.-        pub id: u64,-        /// The elements correspond to the RDI, RSI, RDX, R8, and R9 registers-        /// in the synchronous calling convention.-        pub args: (u64, u64, u64, u64, u64)+    pub struct WithId<T> {+        pub id: AtomicU64,+        pub data: T,     } -    /// The return value of an identified usercall.+    /// A usercall.+    /// The elements correspond to the RDI, RSI, RDX, R8, and R9 registers+    /// in the synchronous calling convention.     #[repr(C)]-    #[derive(Copy, Clone)]+    #[derive(Copy, Clone, Default)]     #[cfg_attr(feature = "rustc-dep-of-std", unstable(feature = "sgx_platform", issue = "56975"))]-    pub struct Return {-        /// `0` indicates this slot is empty.-        pub id: u64,-        /// The elements correspond to the RSI and RDX registers in the-        /// synchronous calling convention.-        pub value: (u64, u64)+    pub struct Usercall(pub u64, pub u64, pub u64, pub u64, pub u64);++    impl Usercall {

Instead, can you impl From<Usercall> for (pub u64, pub u64, pub u64, pub u64, pub u64) and vice-versa? And the same for Return?

mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl Usercalls { /// [`EV_RETURNQ_NOT_EMPTY`]: ../constant.EV_RETURNQ_NOT_EMPTY.html pub mod async {     use super::*;-    use core::sync::atomic::AtomicUsize;+    use core::sync::atomic::{AtomicU64, AtomicUsize}; -    /// An identified usercall.     #[repr(C)]-    #[derive(Copy, Clone)]-    #[cfg_attr(feature = "rustc-dep-of-std", unstable(feature = "sgx_platform", issue = "56975"))]-    pub struct Usercall {-        /// `0` indicates this slot is empty.-        pub id: u64,-        /// The elements correspond to the RDI, RSI, RDX, R8, and R9 registers-        /// in the synchronous calling convention.-        pub args: (u64, u64, u64, u64, u64)+    pub struct WithId<T> {

Please add to all public items added:

#[cfg_attr(feature = "rustc-dep-of-std", unstable(feature = "sgx_platform", issue = "56975"))]
mzohreva

comment created time in 3 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/target+**/*.rs.bk+Cargo.lock

These should be covered by the repo .gitignore

mzohreva

comment created time in 6 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 mod interface;  use self::abi::dispatch; use self::interface::{Handler, OutputBuffer};-#[cfg(unix)]-use self::libc::*;-#[cfg(unix)]-use self::nix::sys::signal;-use crate::loader::{EnclavePanic, ErasedTcs};-use crate::tcs;-use crate::tcs::{CoResult, ThreadResult};-use std::thread::JoinHandle;  const EV_ABORT: u64 = 0b0000_0000_0000_1000; -type UsercallSendData = (ThreadResult<ErasedTcs>, RunningTcs, RefCell<[u8; 1024]>);+// Experiments show that tha actual size of these queues is less important than+// the ratio between them. It appears that a much larger return queue performs+// much better when multiple enclave threads send usercalls.+const USERCALL_QUEUE_SIZE: usize = 16;+const RETURN_QUEUE_SIZE: usize = 1024;++enum UsercallSendData {+    Sync(ThreadResult<ErasedTcs>, RunningTcs, RefCell<[u8; 1024]>),+    Async(Usercall),+}++enum UsercallHandleData {

Add comment: “This is the same as UsercallSendData except that it can't be Sync(CoResult::Return(...), ...)

mzohreva

comment created time in 6 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 pub mod async {     ///    expected to be written imminently.     /// 6. Read the data, then store `0` in the `id`.     /// 7. Store the new read offset.

Maybe this should be “Store the new read offset, retrieving the old offsets.”

mzohreva

comment created time in 6 days

PullRequestReviewEvent
PullRequestReviewEvent

PR closed fortanix/rust-sgx

Pvs/async queues alt

I have created the PR.

I know of two things that I am unsure of

  1. I don't know how exactly to test the implementation
  2. the fn async_queues on being called the second time should return something "equivalent to calling exit(true)" and I am unsure if the current code does that.
+1890 -206

1 comment

17 changed files

parthsane

pr closed time in 3 days

pull request commentfortanix/rust-sgx

Pvs/async queues alt

Closing in favor of #246

parthsane

comment created time in 3 days

PR closed fortanix/rust-sgx

Reviewers
Add Async queues

Added functionality for async queues to improve performance of usercalls Added logic for sleep and wake up of async queue polling threads Added AsyncQueueWaiter for waking up Enclave TCS

+1862 -179

1 comment

11 changed files

parthsane

pr closed time in 3 days

pull request commentfortanix/rust-sgx

Add Async queues

Closing in favor of #246

parthsane

comment created time in 3 days

pull request commentfortanix/rust-mbedtls

Add support for RSA OAEP encryption/decryption with custom labels

bors r=AdrianCX

Pagten

comment created time in 3 days

issue commentrust-lang/rust

Ballooning compile time with LVI mitigations

Ok we now have a Rust nightly that includes the patches from @scottconstable and Krzysztof. @jberci are you happy with the compile times now?

jberci

comment created time in 5 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+use std::sync::atomic::{AtomicU64, Ordering};++#[cfg(target_env = "sgx")]+use std::os::fortanix_sgx::usercalls::alloc::UserSafeSized;++#[cfg(not(target_env = "sgx"))]+pub trait UserSafeSized: Copy + Sized {}++#[cfg(not(target_env = "sgx"))]+impl<T> UserSafeSized for T where T: Copy + Sized {}++#[repr(C)]+pub struct WithId<T: Copy> {

Copy (or UserSafeSized in the enclave)

pub(crate) struct FifoInner<T> {
    data: NonNull<[UnsafeCell<WithId<T>>]>,
    offsets: NonNull<AtomicUsize>,
    storage: Storage<T>,
}

mzohreva

comment created time in 6 days

PullRequestReviewEvent

Pull request review commentfortanix/rust-sgx

Implement async queues

+use std::sync::atomic::{AtomicU64, Ordering};++#[cfg(target_env = "sgx")]+use std::os::fortanix_sgx::usercalls::alloc::UserSafeSized;++#[cfg(not(target_env = "sgx"))]+pub trait UserSafeSized: Copy + Sized {}++#[cfg(not(target_env = "sgx"))]+impl<T> UserSafeSized for T where T: Copy + Sized {}++#[repr(C)]+pub struct WithId<T: Copy> {

WithId in the abi crate, I don't think you need the Identified trait.

mzohreva

comment created time in 6 days

PullRequestReviewEvent
PullRequestReviewEvent

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::FifoDescriptor;+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Identified,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Identified,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++impl<T: Identified> Fifo<T> {+    fn new(len: usize) -> Self {+        assert!(+            len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }+}++enum Storage<T> {+    Shared(Arc<Fifo<T>>),+    Static,+}++impl<T> Clone for Storage<T> {+    fn clone(&self) -> Self {+        match self {+            Storage::Shared(arc) => Storage::Shared(arc.clone()),+            Storage::Static => Storage::Static,+        }+    }+}++pub(crate) struct FifoInner<T> {+    data: NonNull<UnsafeCell<[T]>>,

With [UnsafeCell<T>] you can index/get the length without going through the unsafe wrapper. With UnsafeCell<[T]> you can (unsafely) change the pointer/length. Both of these points make me think [UnsafeCell<T>] is better.

mzohreva

comment created time in 6 days

Pull request review commentfortanix/rust-sgx

Support different API families for the Linux driver loader

 impl EnclaveLoad for InnerDevice {             flags: eadd.secinfo.flags,             ..Default::default()         };-        let adddata = ioctl::AddData {-            dstpage: mapping.base + eadd.offset,-            srcpage: &data,-            secinfo: &secinfo,-            chunks: chunks.0,-        };-        ioctl_unsafe!(Add, ioctl::add(mapping.device.fd.as_raw_fd(), &adddata))+        let dstpage = mapping.base + eadd.offset;+        match mapping.device.driver {+            Montgomery => {+                let adddata = ioctl::montgomery::AddData {+                    dstpage,+                    srcpage: &data,+                    secinfo: &secinfo,+                    chunks: chunks.0,+                };+                ioctl_unsafe!(Add, ioctl::montgomery::add(mapping.device.fd.as_raw_fd(), &adddata))+            },+            Augusta => {+                let flags = match chunks.0 {+                    0 => ioctl::augusta::SgxPageFlags::empty(),+                    0xffff => ioctl::augusta::SgxPageFlags::SGX_PAGE_MEASURE,

It's just “no bits set” and “all bits set” meaning all or no 256-byte chunks in a page, see also the Windows loader.

jethrogb

comment created time in 7 days

PullRequestReviewEvent
PullRequestReviewEvent

Pull request review commentfortanix/rust-sgx

Support different API families for the Linux driver loader

+#!/bin/bash -e+#+# Prior to running this script, make sure the system has the following+# configuration:+#+# * Linux: 4.15 generic kernel (e.g. not the Azure kernel)+# * Test dependencies: build-essential dkms docker.io+# * EDP dependencies: pkg-config libssl-dev protobuf-compiler+# * Rust: latest nightly installed with x86_64-fortanix-unknown-sgx target+# * No other SGX drivers or software installed (from any vendor)+#+# Note: you can't run this script multiple times because the FLC drivers make+# CPU configuration changes that the non-FLC drivers don't support.

Rebooting is easiest. You could manually restore the PUBKEYHASH MSRs but I didn't invest time in figuring out how.

jethrogb

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::FifoDescriptor;+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Identified,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Identified,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++impl<T: Identified> Fifo<T> {+    fn new(len: usize) -> Self {+        assert!(+            len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }+}++enum Storage<T> {+    Shared(Arc<Fifo<T>>),+    Static,+}++impl<T> Clone for Storage<T> {+    fn clone(&self) -> Self {+        match self {+            Storage::Shared(arc) => Storage::Shared(arc.clone()),+            Storage::Static => Storage::Static,+        }+    }+}++pub(crate) struct FifoInner<T> {+    data: NonNull<UnsafeCell<[T]>>,

Maybe an [UnsafeCell<T>] makes more sense. What do you think?

mzohreva

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use crate::fifo::*;+use fortanix_sgx_abi::FifoDescriptor;++unsafe impl<T: Send, S: Send> Send for Sender<T, S> {}+unsafe impl<T: Send, S: Sync> Sync for Sender<T, S> {}++impl<T, S: Clone> Clone for Sender<T, S> {+    fn clone(&self) -> Self {+        Self {+            inner: self.inner.clone(),+            synchronizer: self.synchronizer.clone(),+        }+    }+}++impl<T: Identified, S: Synchronizer> Sender<T, S> {+    /// Create a `Sender` from a `FifoDescriptor` and `Synchronizer`.+    ///+    /// # Safety+    ///+    /// The caller must ensure the following:+    ///+    /// * The `data` and `len` fields in `FifoDescriptor` must adhere to all+    ///   safety requirements described in `std::slice::from_raw_parts_mut()`+    ///+    /// * The `offsets` field in `FifoDescriptor` must be non-null and point+    ///   to a valid memory location holding an `AtomicUsize`.+    ///+    /// * The synchronizer must somehow know how to correctly synchronize with+    ///   the other end of the channel.+    pub unsafe fn from_descriptor(d: FifoDescriptor<T>, synchronizer: S) -> Self {

Continuing the discussion from https://github.com/fortanix/rust-sgx/pull/246#discussion_r490111910 I still don't see the SGX safe user memory types being used here.

mzohreva

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+use std::sync::atomic::{AtomicU64, Ordering};++#[cfg(target_env = "sgx")]+use std::os::fortanix_sgx::usercalls::alloc::UserSafeSized;++#[cfg(not(target_env = "sgx"))]+pub trait UserSafeSized: Copy + Sized {}++#[cfg(not(target_env = "sgx"))]+impl<T> UserSafeSized for T where T: Copy + Sized {}++#[repr(C)]+pub struct WithId<T: Copy> {

Actually I meant for this to go in fortanix-sgx-abi. Then you can also get rid of most of this module.

mzohreva

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::FifoDescriptor;+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Identified,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Identified,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++impl<T: Identified> Fifo<T> {+    fn new(len: usize) -> Self {+        assert!(+            len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }+}++enum Storage<T> {+    Shared(Arc<Fifo<T>>),+    Static,+}++impl<T> Clone for Storage<T> {+    fn clone(&self) -> Self {+        match self {+            Storage::Shared(arc) => Storage::Shared(arc.clone()),+            Storage::Static => Storage::Static,+        }+    }+}++pub(crate) struct FifoInner<T> {+    data: NonNull<UnsafeCell<[T]>>,+    offsets: NonNull<AtomicUsize>,+    storage: Storage<T>,+}++impl<T> Clone for FifoInner<T> {+    fn clone(&self) -> Self {+        Self {+            data: self.data.clone(),+            offsets: self.offsets.clone(),+            storage: self.storage.clone(),+        }+    }+}++impl<T: Identified> FifoInner<T> {+    pub(crate) unsafe fn from_descriptor(descriptor: FifoDescriptor<T>) -> Self {+        assert!(+            descriptor.len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let data_slice = std::slice::from_raw_parts_mut(descriptor.data, descriptor.len);+        Self {+            data: NonNull::new_unchecked(data_slice as *mut [T] as *mut UnsafeCell<[T]>),+            offsets: NonNull::new_unchecked(descriptor.offsets as *mut AtomicUsize),+            storage: Storage::Static,+        }+    }++    fn from_arc(fifo: Arc<Fifo<T>>) -> Self {+        unsafe {+            Self {+                data: NonNull::new_unchecked(+                    fifo.data.as_ref() as *const [T] as *mut [T] as *mut UnsafeCell<[T]>+                ),+                offsets: NonNull::new_unchecked(+                    fifo.offsets.as_ref() as *const AtomicUsize as *mut AtomicUsize+                ),+                storage: Storage::Shared(fifo),+            }+        }+    }++    /// Consumes `self` and returns a FifoDescriptor. If `self` was created+    /// using `from_arc`, it leaks the internal `Arc` copy to ensure the+    /// resulting descriptor is valid for `'static` lifetime.+    pub(crate) fn into_descriptor(self) -> FifoDescriptor<T> {+        match self.storage {+            Storage::Shared(arc) => mem::forget(arc),+            Storage::Static => {}+        }+        let data_mut = unsafe { &mut *self.data.as_ref().get() };+        FifoDescriptor {+            data: data_mut.as_mut_ptr(),+            len: data_mut.len(),+            offsets: self.offsets.as_ptr(),+        }+    }++    fn slot(&self, index: usize) -> &mut T {+        let data_mut = unsafe { &mut *self.data.as_ref().get() };+        &mut data_mut[index]+    }++    fn data_len(&self) -> usize {+        let data = unsafe { &*self.data.as_ref().get() };+        data.len()+    }++    fn offsets(&self) -> &AtomicUsize {+        unsafe { self.offsets.as_ref() }+    }++    pub(crate) fn try_send_impl(&self, val: T) -> Result</*wake up reader:*/ bool, (TrySendError, T)> {+        let (new, was_empty) = loop {+            // 1. Load the current offsets.+            let current = Offsets::new(self.offsets().load(Ordering::SeqCst), self.data_len() as u32);+            let was_empty = current.is_empty();++            // 2. If the queue is full, wait, then go to step 1.+            if current.is_full() {+                return Err((TrySendError::QueueFull, val));+            }++            // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+            //    with the current offsets. If the CAS was not succesful, go to step 1.+            let new = current.increment_write_offset();+            let current = current.as_usize();+            let prev = self.offsets().compare_and_swap(current, new.as_usize(), Ordering::SeqCst);+            if prev == current {+                break (new, was_empty);+            }+        };++        // 4. Write the data, then the `id`.+        let slot = self.slot(new.write_offset());+        slot.copy_except_id(&val);+        slot.set_id(val.get_id_non_atomic());++        // 5. If the queue was empty in step 1, signal the reader to wake up.+        Ok(was_empty)+    }++    pub(crate) fn try_recv_impl(&self) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+        // 1. Load the current offsets.+        let current = Offsets::new(self.offsets().load(Ordering::SeqCst), self.data_len() as u32);+        let was_full = current.is_full();++        // 2. If the queue is empty, wait, then go to step 1.+        if current.is_empty() {+            return Err(TryRecvError::QueueEmpty);+        }++        // 3. Add 1 to the read offset.+        let new = current.increment_read_offset();++        let slot = loop {+            // 4. Read the `id` at the new read offset.+            let slot = self.slot(new.read_offset());+            let id = slot.get_id();++            // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+            //    expected to be written imminently.+            if id != 0 {+                break slot;+            }+        };++        // 6. Read the data, then store `0` in the `id`.+        let val = *slot;+        slot.set_id(0);++        // 7. Store the new read offset.+        let after = fetch_adjust(+            self.offsets(),+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        );++        // 8. If the queue was full in step 1, signal the writer to wake up.+        //    ... or became full during read+        let became_full = Offsets::new(after, self.data_len() as u32).is_full();+        Ok((val, was_full || became_full))

Ok I now think this makes sense. But I also think became_full is the only thing that should be checked. Please update the abi spec as well (“If the queue was full before step 7").

mzohreva

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+[package]+name = "ipc-queue"+version = "0.1.0"+authors = ["Fortanix, Inc."]+license = "MPL-2.0"+edition = "2018"+description = """+An implementation of FIFO queue described in fortanix-sgx-abi used for asynchronous usercalls.+"""+repository = "https://github.com/fortanix/rust-sgx"+documentation = "https://edp.fortanix.com/docs/api/ipc_queue/"+homepage = "https://edp.fortanix.com/"+keywords = ["sgx", "fifo", "queue", "ipc"]+categories = ["asynchronous"]++[dependencies]+fortanix-sgx-abi = { version = "0.3.0", path = "../fortanix-sgx-abi" }+static_assertions = "1.1.0"

This should be a dev depedency?

mzohreva

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::FifoDescriptor;+use std::cell::UnsafeCell;+use std::mem;+use std::ptr::NonNull;+use std::sync::atomic::{AtomicUsize, Ordering};+use std::sync::Arc;++pub fn bounded<T, S>(len: usize, s: S) -> (Sender<T, S>, Receiver<T, S>)+where+    T: Identified,+    S: Synchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = Sender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = Receiver { inner, synchronizer: s };+    (tx, rx)+}++pub fn bounded_async<T, S>(len: usize, s: S) -> (AsyncSender<T, S>, AsyncReceiver<T, S>)+where+    T: Identified,+    S: AsyncSynchronizer,+{+    let arc = Arc::new(Fifo::new(len));+    let inner = FifoInner::from_arc(arc);+    let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() };+    let rx = AsyncReceiver { inner, synchronizer: s };+    (tx, rx)+}++struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++impl<T: Identified> Fifo<T> {+    fn new(len: usize) -> Self {+        assert!(+            len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }+}++enum Storage<T> {+    Shared(Arc<Fifo<T>>),+    Static,+}++impl<T> Clone for Storage<T> {+    fn clone(&self) -> Self {+        match self {+            Storage::Shared(arc) => Storage::Shared(arc.clone()),+            Storage::Static => Storage::Static,+        }+    }+}++pub(crate) struct FifoInner<T> {+    data: NonNull<UnsafeCell<[T]>>,+    offsets: NonNull<AtomicUsize>,+    storage: Storage<T>,+}++impl<T> Clone for FifoInner<T> {+    fn clone(&self) -> Self {+        Self {+            data: self.data.clone(),+            offsets: self.offsets.clone(),+            storage: self.storage.clone(),+        }+    }+}++impl<T: Identified> FifoInner<T> {+    pub(crate) unsafe fn from_descriptor(descriptor: FifoDescriptor<T>) -> Self {+        assert!(+            descriptor.len.is_power_of_two(),+            "Fifo len should be a power of two"+        );+        let data_slice = std::slice::from_raw_parts_mut(descriptor.data, descriptor.len);+        Self {+            data: NonNull::new_unchecked(data_slice as *mut [T] as *mut UnsafeCell<[T]>),+            offsets: NonNull::new_unchecked(descriptor.offsets as *mut AtomicUsize),+            storage: Storage::Static,+        }+    }++    fn from_arc(fifo: Arc<Fifo<T>>) -> Self {+        unsafe {+            Self {+                data: NonNull::new_unchecked(+                    fifo.data.as_ref() as *const [T] as *mut [T] as *mut UnsafeCell<[T]>+                ),+                offsets: NonNull::new_unchecked(+                    fifo.offsets.as_ref() as *const AtomicUsize as *mut AtomicUsize+                ),+                storage: Storage::Shared(fifo),+            }+        }+    }++    /// Consumes `self` and returns a FifoDescriptor. If `self` was created+    /// using `from_arc`, it leaks the internal `Arc` copy to ensure the+    /// resulting descriptor is valid for `'static` lifetime.

This is not great. I think you should return some type that can produce a FifoDescriptor<T> but that the caller needs to hold on to for as long as it knows that the other end is using it (e.g. for as long as the enclave lives).

mzohreva

comment created time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use fortanix_sgx_abi::FifoDescriptor;+use std::future::Future;+use std::pin::Pin;+use std::sync::atomic::AtomicUsize;++mod fifo;+mod interface_sync;+mod interface_async;+#[cfg(test)]+mod test_support;++/// A FIFO queue implemented according to [fortanix_sgx_abi specifications].+///+/// **NOTE:** Sender and reciever types use FifoDescriptor internally which+/// does not hold a reference to the Fifo instance, therefore users of these+/// types must ensure that the Fifo instance lives at least as long as all+/// senders and receivers for that queue.+///+/// **NOTE:** sync and async sender/receiver types should not be used together.+/// i.e. either use sync senders/receivers or the async ones, but don't mix+/// sync and async. The interfaces are designed for use in SGX enclaves (sync)+/// and enclave runner (async).+///+/// [fortanix_sgx_abi specifications]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/async/struct.FifoDescriptor.html+pub struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++/// This is used as a bound on `T` in `Fifo<T>` and related types.+/// Types that implement this trait must have an `id: AtomicU64` field and use+/// `Ordering::SeqCst` in `get_id()` and `set_id()`.+pub trait WithAtomicId {+    /// Must set the `id` field to 0.+    fn empty() -> Self;+    fn get_id(&self) -> u64;+    fn set_id(&mut self, id: u64);+    /// Copy everything except the `id` field from another instance to self.+    fn copy_except_id(&mut self, from: &Self);+}++#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]+pub enum QueueEvent {+    NotEmpty,+    NotFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TrySendError {+    QueueFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TryRecvError {+    QueueEmpty,+}

I see. But shouldn't we at least account for this future possibility in the API?

mzohreva

comment created time in 7 days

PullRequestReviewEvent

push eventfortanix/rust-sgx

Jethro Beekman

commit sha 00f518e0a7ed99362768c67da93c59a446e325ba

Add test for sgxs-loaders

view details

Jethro Beekman

commit sha c347afd47ef13e6a6529f280dbb66734d9cdb3d4

Support different API families for the Linux driver loader

view details

Jethro Beekman

commit sha 3d4708b2dc037edb47498e3edbf1d52b7af85e4e

Add crawler for finding all Linux SGX driver releases publihsed by Intel

view details

Jethro Beekman

commit sha d135d88cd6b206b963fcdecdda2e2aca1f25a053

Add script to test loading enclaves with different Linux driver versions

view details

push time in 7 days

push eventfortanix/rust-sgx

jethrogb

commit sha e5fd0d5fbeb1eb360fd94b5182ff96cd9f6aa52e

Update libstd URL in README

view details

bors[bot]

commit sha 160b501831dd19e9e989c03c042d2d6d23f01e12

Merge #284 284: Update libstd URL in README r=Goirad a=jethrogb Co-authored-by: jethrogb <github@jbeekman.nl>

view details

Mohsen Zohrevandi

commit sha 94433262d37153bf39e93b7fc38107e5eb073f80

Update Cargo.lock format

view details

bors[bot]

commit sha bd700b79680151c24166e0fd4dc6583073796128

Merge #285 285: Update Cargo.lock format r=Goirad a=mzohreva This should not change anything besides the `Cargo.lock` format. Co-authored-by: Mohsen Zohrevandi <mohsen.zohrevandi@fortanix.com>

view details

Jethro Beekman

commit sha 38513c9cc4c6309bcea7c0459e997cec2fb28f08

Update sgxs-loaders to 2018 edition

view details

Jethro Beekman

commit sha d811adcacf688b97f6e154030188e18843036e72

Add test for sgxs-loaders

view details

Jethro Beekman

commit sha 1850a3ea60dee21329c50233ec54771528eaba42

Support different API families for the Linux driver loader

view details

Jethro Beekman

commit sha d412f0298efeba4a22874db7a07d4f6e7865caaa

Add crawler for finding all Linux SGX driver releases publihsed by Intel

view details

Jethro Beekman

commit sha 4a164ede7b8219736ff96bf192e54f1edecd1727

Add script to test loading enclaves with different Linux driver versions

view details

push time in 7 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 impl<'future, 'ioinput: 'future, 'tcs: 'ioinput> Usercalls<'future> for Handler<         return_queue: *mut FifoDescriptor<Return>,     ) -> std::pin::Pin<Box<dyn Future<Output = (Self, UsercallResult<Result>)> + 'future>> {         async move {-            unsafe {-                let ret = Ok((|| {-                    let usercall_queue = usercall_queue-                        .as_mut()-                        .ok_or(IoError::from(IoErrorKind::InvalidInput))?;-                    let return_queue = return_queue-                        .as_mut()-                        .ok_or(IoError::from(IoErrorKind::InvalidInput))?;-                    self.0.async_queues(usercall_queue, return_queue)-                })()-                .to_sgx_result());-                return (self, ret);+            let queues = (|| -> IoResult<_> {+                unsafe {+                    let uq = usercall_queue.as_mut().ok_or(IoError::from(IoErrorKind::InvalidInput))?;+                    let rq = return_queue.as_mut().ok_or(IoError::from(IoErrorKind::InvalidInput))?;+                    Ok((uq, rq))+                }+            })();+            let (uq, rq) = match queues {+                Err(e) => {+                    let ret: IoResult<()> = Err(e);+                    return (self, Ok(ret.to_sgx_result()));+                }+                Ok((uq, rq)) => (uq, rq),+            };+            match self.0.async_queues(uq, rq).await {+                Ok(()) => (self, Ok(Ok(()).to_sgx_result())),+                Err(e) => (self, Err(e)),
            unsafe {
                let ret = match (usercall_queue.as_mut(), return_queue.as_mut()) {
                    (Some(usercall_queue), Some(return_queue)) => {
                        self.0.async_queues(usercall_queue, return_queue).await.map(Ok)
                    },
                    _ => {
                        Ok(Err(IoErrorKind::InvalidInput.into()))
                    },
                };
                return (self, ret.map(|v| v.to_sgx_result()));
            }
mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

 extern crate fortanix_sgx_abi; #[macro_use] extern crate lazy_static; extern crate futures;+extern crate async_queue;

This crate is 2018 edition

mzohreva

comment created time in 11 days

PullRequestReviewEvent
PullRequestReviewEvent

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use fortanix_sgx_abi::FifoDescriptor;+use std::future::Future;+use std::pin::Pin;+use std::sync::atomic::AtomicUsize;++mod fifo;+mod interface_sync;+mod interface_async;+#[cfg(test)]+mod test_support;++/// A FIFO queue implemented according to [fortanix_sgx_abi specifications].+///+/// **NOTE:** Sender and reciever types use FifoDescriptor internally which+/// does not hold a reference to the Fifo instance, therefore users of these+/// types must ensure that the Fifo instance lives at least as long as all+/// senders and receivers for that queue.+///+/// **NOTE:** sync and async sender/receiver types should not be used together.+/// i.e. either use sync senders/receivers or the async ones, but don't mix+/// sync and async. The interfaces are designed for use in SGX enclaves (sync)+/// and enclave runner (async).+///+/// [fortanix_sgx_abi specifications]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/async/struct.FifoDescriptor.html+pub struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++/// This is used as a bound on `T` in `Fifo<T>` and related types.+/// Types that implement this trait must have an `id: AtomicU64` field and use+/// `Ordering::SeqCst` in `get_id()` and `set_id()`.+pub trait WithAtomicId {+    /// Must set the `id` field to 0.+    fn empty() -> Self;+    fn get_id(&self) -> u64;+    fn set_id(&mut self, id: u64);+    /// Copy everything except the `id` field from another instance to self.+    fn copy_except_id(&mut self, from: &Self);+}++#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]+pub enum QueueEvent {+    NotEmpty,+    NotFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TrySendError {+    QueueFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TryRecvError {+    QueueEmpty,+}++#[derive(Debug, PartialEq, Eq)]+pub enum SendError {+    Closed,+}++#[derive(Debug, PartialEq, Eq)]+pub enum RecvError {+    Closed,+}++#[derive(Debug, PartialEq, Eq)]+pub enum SynchronizationError {+    ChannelClosed,+}++pub trait Synchronizer {+    /// block execution until the specified event happens.+    fn wait(&self, event: QueueEvent) -> Result<(), SynchronizationError>;++    /// notify all waiters blocked on the specified event for the same Fifo.+    fn notify(&self, event: QueueEvent);+}++pub struct Sender<T, S> {+    descriptor: FifoDescriptor<T>,+    synchronizer: S,+}++pub struct Receiver<T, S> {+    descriptor: FifoDescriptor<T>,+    synchronizer: S,+}++pub trait AsyncSynchronizer {+    /// block execution until the specified event happens.+    fn wait(&self, event: QueueEvent) -> Pin<Box<dyn Future<Output = Result<(), SynchronizationError>> + '_>>;++    /// notify all waiters blocked on the specified event for the same Fifo.+    fn notify(&self, event: QueueEvent) -> Pin<Box<dyn Future<Output = ()> + '_>>;

Is this supposed to return a future?

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use fortanix_sgx_abi::FifoDescriptor;+use std::future::Future;+use std::pin::Pin;+use std::sync::atomic::AtomicUsize;++mod fifo;+mod interface_sync;+mod interface_async;+#[cfg(test)]+mod test_support;++/// A FIFO queue implemented according to [fortanix_sgx_abi specifications].+///+/// **NOTE:** Sender and reciever types use FifoDescriptor internally which+/// does not hold a reference to the Fifo instance, therefore users of these+/// types must ensure that the Fifo instance lives at least as long as all+/// senders and receivers for that queue.+///+/// **NOTE:** sync and async sender/receiver types should not be used together.+/// i.e. either use sync senders/receivers or the async ones, but don't mix+/// sync and async. The interfaces are designed for use in SGX enclaves (sync)+/// and enclave runner (async).

Also, it'd be good if we could somehow leverage the UserSafe wrappers when used inside the enclave.

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }

These APIs seem not that useful to me. If you want to try to create a safer API, consider adding lifetimes to Sender/Receiver (e.g. using PhantomData). Also, you should be able to create a channel (Sender, Receiver) in a single call taking &mut self. You should then be able to convert one of them (taken by value) into a FifoDescriptor.

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);

Assert you removed something

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )

Alternatively, you can cast to a smaller atomic and just do a store (which avoids an atomic read-modify-write here).

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();

Is this useful? There's a 1000 things that could go wrong when setting up the queue, why check for this? On the other hand I guess this is pretty cheap.

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use fortanix_sgx_abi::FifoDescriptor;+use std::future::Future;+use std::pin::Pin;+use std::sync::atomic::AtomicUsize;++mod fifo;+mod interface_sync;+mod interface_async;+#[cfg(test)]+mod test_support;++/// A FIFO queue implemented according to [fortanix_sgx_abi specifications].+///+/// **NOTE:** Sender and reciever types use FifoDescriptor internally which+/// does not hold a reference to the Fifo instance, therefore users of these+/// types must ensure that the Fifo instance lives at least as long as all+/// senders and receivers for that queue.+///+/// **NOTE:** sync and async sender/receiver types should not be used together.+/// i.e. either use sync senders/receivers or the async ones, but don't mix+/// sync and async. The interfaces are designed for use in SGX enclaves (sync)+/// and enclave runner (async).+///+/// [fortanix_sgx_abi specifications]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/async/struct.FifoDescriptor.html+pub struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++/// This is used as a bound on `T` in `Fifo<T>` and related types.+/// Types that implement this trait must have an `id: AtomicU64` field and use+/// `Ordering::SeqCst` in `get_id()` and `set_id()`.+pub trait WithAtomicId {+    /// Must set the `id` field to 0.+    fn empty() -> Self;+    fn get_id(&self) -> u64;+    fn set_id(&mut self, id: u64);+    /// Copy everything except the `id` field from another instance to self.+    fn copy_except_id(&mut self, from: &Self);+}++#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]+pub enum QueueEvent {+    NotEmpty,+    NotFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TrySendError {+    QueueFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TryRecvError {+    QueueEmpty,+}++#[derive(Debug, PartialEq, Eq)]+pub enum SendError {+    Closed,+}++#[derive(Debug, PartialEq, Eq)]+pub enum RecvError {+    Closed,+}++#[derive(Debug, PartialEq, Eq)]+pub enum SynchronizationError {+    ChannelClosed,+}++pub trait Synchronizer {+    /// block execution until the specified event happens.+    fn wait(&self, event: QueueEvent) -> Result<(), SynchronizationError>;++    /// notify all waiters blocked on the specified event for the same Fifo.+    fn notify(&self, event: QueueEvent);+}++pub struct Sender<T, S> {+    descriptor: FifoDescriptor<T>,

For all these types, I think we should convert FifoDescriptor on construction to more useful types &AtomicOffsets and &UnsafeCell<[T]>

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());

You already read the id in step 4

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();

This deviation from the spec doesn't make sense. If it's full now, you shouldn't be waking up any writers.

mzohreva

comment created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);

HashSet<*const ()> makes more sense I think.

mzohreva

comment created time in 11 days

PullRequestReviewEvent
PullRequestReviewEvent

push eventjethrogb/rust

Prabakaran Kumaresshan

commit sha 7ea4c28af238883884deddbe5c411d01355fd12b

add i32::MAX link

view details

khyperia

commit sha 1663bfba02b7c1545a3c241b6b08cc9d9b97c475

Fix matching on field.abi instead of self.layout.abi

view details

Joshua Nelson

commit sha a1c71a17090cd5ef732d852ee878114375ff9254

rustdoc,metadata: Debugging

view details

Joshua Nelson

commit sha 9131d23cc0fb5461050bc19e40a3858b61487069

resolve: Don't speculatively load crates if this is a speculative resolution This avoids a rare rustdoc bug where loading `core` twice caused a 'cannot find a built-in macro' error: 1. `x.py build --stage 1` builds the standard library and creates a sysroot 2. `cargo doc` does something like `cargo check` to create `rmeta`s for all the crates (unrelated to what was built above) 3. the `cargo check`-like `libcore-*.rmeta` is loaded as a transitive dependency *and claims ownership* of builtin macros 4. `rustdoc` later tries to resolve some path in a doc link 5. suggestion logic fires and loads "extern prelude" crates by name 6. the sysroot `libcore-*.rlib` is loaded and *fails to claim ownership* of builtin macros This fixes step 5. by not running suggestion logic if this is a speculative resolution. Additionally, it marks `resolve_ast_path` as a speculative resolution.

view details

DPC

commit sha b3d7b7bdcbea782d15a34861acfb8c4bdb1b96c1

update fixmes

view details

bors

commit sha db534b3ac286cf45688c3bbae6aa6e77439e52d2

Auto merge of #75176 - jyn514:impl-link, r=GuillaumeGomez,petrochenkov Fix intra-doc links for cross-crate re-exports of default trait methods The original fix for this was very simple: https://github.com/rust-lang/rust/pull/58972 ignored `extern_traits` because before https://github.com/rust-lang/rust/issues/65983 was fixed, they would always fail to resolve, giving spurious warnings. So the first commit just undoes that change, so extern traits are now seen by the `collect_intra_doc_links` pass. There are also some minor changes in `librustdoc/fold.rs` to avoid borrowing the `extern_traits` RefCell more than once at a time. However, that brought up a much more thorny problem. `rustc_resolve` started giving 'error: cannot find a built-in macro with name `cfg`' when documenting `libproc_macro` (I still haven't been able to reproduce on anything smaller than the full standard library). The chain of events looked like this (thanks @eddyb for the help debugging!): 0. `x.py build --stage 1` builds the standard library and creates a sysroot 1. `cargo doc` does something like `cargo check` to create `rmeta`s for all the crates (unrelated to what was built above) 2. the `cargo check`-like `libcore-*.rmeta` is loaded as a transitive dependency *and claims ownership* of builtin macros 3. `rustdoc` later tries to resolve some path in a doc link 4. suggestion logic fires and loads "extern prelude" crates by name 5. the sysroot `libcore-*.rlib` is loaded and *fails to claim ownership* of builtin macros `rustc_resolve` gives the error after step 5. However, `rustdoc` doesn't need suggestions at all - `resolve_str_path_error` completely discards the `ResolutionError`! The fix implemented in this PR is to skip the suggestion logic for `resolve_ast_path`: pass `record_used: false` and skip `lookup_import_candidates` when `record_used` isn't set. It's possible that if/when https://github.com/rust-lang/rust/issues/74207 is implemented this will need a more in-depth fix which returns a `ResolutionError` from `compile_macro`, to allow rustdoc to reuse the suggestions from rustc_resolve. However, that's a much larger change and there's no need for it yet, so I haven't implemented it here. Fixes https://github.com/rust-lang/rust/issues/73829. r? @GuillaumeGomez

view details

Ivan Tham

commit sha 1d017eb6a4602d18ccae9e8dfcc209fb8382982e

Fix env doc vars_os broken link

view details

Prabakaran Kumaresshan

commit sha 523fea4d1405c4e2d4a96d126f4d990d342de6cc

revert Some(Item) link

view details

Joshua Nelson

commit sha e885f00f24aab657b3a9835818fc96e638e7fb21

Comment out test for generated docs until rustdoc changes its behavior around documenting re-exports

view details

mark

commit sha 9e5f7d5631b8f4009ac1c693e585d4b7108d4275

mv compiler to compiler/

view details

bors

commit sha 85fbf49ce0e2274d0acf798f6e703747674feec3

Auto merge of #74862 - mark-i-m:mv-compiler, r=petrochenkov Move almost all compiler crates to compiler/ This PR implements https://github.com/rust-lang/compiler-team/issues/336 and moves all `rustc_*` crates from `src` to the new `compiler` directory. `librustc_foo` directories are renamed to `rustc_foo`. `src` directories are introduced inside `rustc_*` directories to mirror the scheme already use for `library` crates.

view details

Andy Russell

commit sha e0822ecdbc43a6128136661bb73fb6f3c3db2b4a

rustdoc: do not use plain summary for trait impls Fixes #38386. Fixes #48332. Fixes #49430. Fixes #62741. Fixes #73474.

view details

Andy Russell

commit sha 98232ece14bdd68aeac3d761039d9a7c88c30b3f

fix broken trait method links

view details

Joshua Nelson

commit sha d7150154fa5c35c0b570570f156ba3a5cc6dfb1d

Improve tests Now this actually tests the links are generated correctly

view details

CDirkx

commit sha 5fac991bf6bc4b07df9b4b4eb3fcb0c5487973c4

Add unstable `const_ordering` feature, and some tests.

view details

Aaron Hill

commit sha a97dcfa3755990062fd7905370cddd3843541db0

Run cfg-stripping on generic parameters before invoking derive macros Fixes #75930 This changes the tokens seen by a proc-macro. However, ising a `#[cfg]` attribute on a generic paramter is unusual, and combining it with a proc-macro derive is probably even more unusual. I don't expect this to cause any breakage.

view details

Aleksey Kladov

commit sha ccffea5b6b3372cefd4e15bc738a2669bc6f69a0

Move lexer unit tests to rustc_lexer StringReader is an intornal abstraction which at the moment changes a lot, so these unit tests cause quite a bit of friction. Moving them to rustc_lexer and more ingerated-testing style should make them much less annoying, hopefully without decreasing their usefulness much. Note that coloncolon tests are removed (it's unclear what those are testing). \r\n tests are removed as well, as we normalize line endings even before lexing.

view details

Aleksey Kladov

commit sha 518cac91902d34567ac8bfea3022f426a7de53f6

Remove unused function

view details

David Wood

commit sha 6ff471b1cf85dea0e8f83b5212042905aac35143

ty: remove obsolete printer This commit removes the obsolete printer and replaces all uses of it with `FmtPrinter`. Of the replaced uses, all but one use was in `debug!` logging, two cases were notable: - `MonoItem::to_string` is used in `-Z print-mono-items` and therefore affects the output of all codegen-units tests. - `DefPathBasedNames` was used in `librustc_codegen_llvm/type_of.rs` with `LLVMStructCreateNamed` and that'll now get different values, but this should result in no functional change. Signed-off-by: David Wood <david@davidtw.co>

view details

Caleb Cartwright

commit sha 883b1e7592dec6e309db018a2a87253d91d50b4a

parser: restore some fn visibility for rustfmt

view details

push time in 11 days

PR opened rust-lang/rust

Update LLVM

This (partially?) addresses rust-lang/rust#74632

r? @cuviper

+1 -1

0 comment

1 changed file

pr created time in 11 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+[package]+name = "async-queue"

ipc-queue

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);+    }+}++// Note: we cannot have an AtomicU64 id in Usercall/Return types since they+// need to be Copy due to requirements of `UserSafeSized` (see definition of+// this trait in rust/src/libstd/sys/sgx/abi/usercalls/alloc.rs). Therefore+// all the transmutes in the implementation below.+impl WithAtomicId for Usercall {+    fn empty() -> Self {+        Self {+            id: 0,+            args: (0, 0, 0, 0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, args } = from;+        self.args = *args;+    }+}++impl WithAtomicId for Return {+    fn empty() -> Self {+        Self {+            id: 0,+            value: (0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, value } = from;+        self.value = *value;+    }+}++pub(crate) fn fetch_adjust(x: &AtomicUsize, delta: isize, ord: Ordering) -> usize {+    match delta > 0 {+        true => x.fetch_add(delta as usize, ord),+        false => x.fetch_sub(-delta as usize, ord),+    }+}++#[derive(Clone, Copy)]+pub(crate) struct Offsets {

Please add a compile-time assertion that usize is 8 bytes. The algorithm can also work on a 32-bit platform with u16/u16, but this code doesn't.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use fortanix_sgx_abi::FifoDescriptor;+use std::future::Future;+use std::pin::Pin;+use std::sync::atomic::AtomicUsize;++mod fifo;+mod interface_sync;+mod interface_async;+#[cfg(test)]+mod test_support;++/// A FIFO queue implemented according to [fortanix_sgx_abi specifications].+///+/// **NOTE:** Sender and reciever types use FifoDescriptor internally which+/// does not hold a reference to the Fifo instance, therefore users of these+/// types must ensure that the Fifo instance lives at least as long as all+/// senders and receivers for that queue.+///+/// **NOTE:** sync and async sender/receiver types should not be used together.+/// i.e. either use sync senders/receivers or the async ones, but don't mix+/// sync and async. The interfaces are designed for use in SGX enclaves (sync)+/// and enclave runner (async).

This requirement should be documented on Sender::new etc. and those functions should be unsafe.

mzohreva

comment created time in 12 days

PullRequestReviewEvent

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);+    }+}++// Note: we cannot have an AtomicU64 id in Usercall/Return types since they+// need to be Copy due to requirements of `UserSafeSized` (see definition of

I don't think so? UserSafeSized just needs FifoDescriptor<T> to be Copy, but that doesn't require T: Copy. So it should be possible to just convert the id fields to AtomicU64? Even if not, I prefer type casting over transmutes (transmuting a reference is pretty pointless).

This is related to something else I noticed: it would be good if we could enforce some kind of Copy bound on WithAtomicId. We could perhaps rearchitect the whole thing as follows:

#[repr(C)]
struct WithId<T: Copy> {
    id: AtomicU64,
    data: T,
}

I have a faint memory that we used to have that, but I don't remember the details.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };

I much prefer pointer::offset over pointer::add and I think we never use the sign bit so you can just use isize.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);+    }+}++// Note: we cannot have an AtomicU64 id in Usercall/Return types since they+// need to be Copy due to requirements of `UserSafeSized` (see definition of+// this trait in rust/src/libstd/sys/sgx/abi/usercalls/alloc.rs). Therefore+// all the transmutes in the implementation below.+impl WithAtomicId for Usercall {+    fn empty() -> Self {+        Self {+            id: 0,+            args: (0, 0, 0, 0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, args } = from;+        self.args = *args;+    }+}++impl WithAtomicId for Return {+    fn empty() -> Self {+        Self {+            id: 0,+            value: (0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, value } = from;+        self.value = *value;+    }+}++pub(crate) fn fetch_adjust(x: &AtomicUsize, delta: isize, ord: Ordering) -> usize {+    match delta > 0 {+        true => x.fetch_add(delta as usize, ord),+        false => x.fetch_sub(-delta as usize, ord),+    }+}++#[derive(Clone, Copy)]+pub(crate) struct Offsets {+    write: u32,+    read: u32,+    len: u32,+}++impl Offsets {+    pub(crate) fn new(offsets: usize, len: u32) -> Self {+        Self {+            write: (offsets >> 32) as u32,+            read: (offsets & ((1 << 32) - 1)) as u32,+            len,+        }+    }++    pub(crate) fn as_usize(&self) -> usize {+        ((self.write as usize) << 32) | (self.read as usize)+    }++    pub(crate) fn is_empty(&self) -> bool {+        self.read_offset() == self.write_offset() && self.read == self.write+    }++    pub(crate) fn is_full(&self) -> bool {+        self.read_offset() == self.write_offset() && self.read != self.write

I suppose this is fine. Alternatively, (self.read ^ self.write) == some_function_of(len). The optimizer might catch this though.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);+    }+}++// Note: we cannot have an AtomicU64 id in Usercall/Return types since they+// need to be Copy due to requirements of `UserSafeSized` (see definition of+// this trait in rust/src/libstd/sys/sgx/abi/usercalls/alloc.rs). Therefore+// all the transmutes in the implementation below.+impl WithAtomicId for Usercall {+    fn empty() -> Self {+        Self {+            id: 0,+            args: (0, 0, 0, 0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, args } = from;+        self.args = *args;+    }+}++impl WithAtomicId for Return {+    fn empty() -> Self {+        Self {+            id: 0,+            value: (0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, value } = from;+        self.value = *value;+    }+}++pub(crate) fn fetch_adjust(x: &AtomicUsize, delta: isize, ord: Ordering) -> usize {+    match delta > 0 {+        true => x.fetch_add(delta as usize, ord),+        false => x.fetch_sub(-delta as usize, ord),+    }+}++#[derive(Clone, Copy)]+pub(crate) struct Offsets {+    write: u32,+    read: u32,+    len: u32,+}++impl Offsets {+    pub(crate) fn new(offsets: usize, len: u32) -> Self {+        Self {+            write: (offsets >> 32) as u32,+            read: (offsets & ((1 << 32) - 1)) as u32,+            len,+        }+    }++    pub(crate) fn as_usize(&self) -> usize {+        ((self.write as usize) << 32) | (self.read as usize)+    }++    pub(crate) fn is_empty(&self) -> bool {+        self.read_offset() == self.write_offset() && self.read == self.write

This is just self.read == self.write

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");

You should assert the same somewhere in the receiver/sender path.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);+    }+}++// Note: we cannot have an AtomicU64 id in Usercall/Return types since they+// need to be Copy due to requirements of `UserSafeSized` (see definition of+// this trait in rust/src/libstd/sys/sgx/abi/usercalls/alloc.rs). Therefore+// all the transmutes in the implementation below.+impl WithAtomicId for Usercall {+    fn empty() -> Self {+        Self {+            id: 0,+            args: (0, 0, 0, 0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, args } = from;+        self.args = *args;+    }+}++impl WithAtomicId for Return {+    fn empty() -> Self {+        Self {+            id: 0,+            value: (0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, value } = from;+        self.value = *value;+    }+}++pub(crate) fn fetch_adjust(x: &AtomicUsize, delta: isize, ord: Ordering) -> usize {+    match delta > 0 {+        true => x.fetch_add(delta as usize, ord),+        false => x.fetch_sub(-delta as usize, ord),+    }+}++#[derive(Clone, Copy)]+pub(crate) struct Offsets {+    write: u32,+    read: u32,+    len: u32,+}++impl Offsets {+    pub(crate) fn new(offsets: usize, len: u32) -> Self {+        Self {+            write: (offsets >> 32) as u32,+            read: (offsets & ((1 << 32) - 1)) as u32,+            len,+        }+    }++    pub(crate) fn as_usize(&self) -> usize {+        ((self.write as usize) << 32) | (self.read as usize)+    }++    pub(crate) fn is_empty(&self) -> bool {+        self.read_offset() == self.write_offset() && self.read == self.write+    }++    pub(crate) fn is_full(&self) -> bool {+        self.read_offset() == self.write_offset() && self.read != self.write+    }++    pub(crate) fn read_offset(&self) -> usize {+        (self.read % self.len) as _+    }++    pub(crate) fn write_offset(&self) -> usize {+        (self.write % self.len) as _+    }++    pub(crate) fn increment_read_offset(&self) -> Self {+        Self {+            read: (self.read + 1) % (self.len * 2),+            ..*self+        }+    }++    pub(crate) fn increment_write_offset(&self) -> Self {+        Self {+            write: (self.write + 1) % (self.len * 2),

Since len is stored in the variable, I don't think the optimizer will be able to figure out it's a power of 2. Please rewrite all modulo operators in terms of bitwise operations.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use super::*;+use fortanix_sgx_abi::{FifoDescriptor, Return, Usercall};+use lazy_static::lazy_static;+use std::collections::HashSet;+use std::mem;+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};+use std::sync::Mutex;++impl<T: WithAtomicId> Fifo<T> {+    /// panics if len is not a power of two.+    pub fn new(len: usize) -> Self {+        assert!(len.is_power_of_two(), "Fifo len should be a power of two");+        let mut data = Vec::with_capacity(len);+        data.resize_with(len, T::empty);+        Self {+            data: data.into_boxed_slice(),+            offsets: Box::new(AtomicUsize::new(0)),+        }+    }++    pub fn descriptor(&mut self) -> FifoDescriptor<T> {+        FifoDescriptor {+            data: self.data.as_mut().as_mut_ptr(),+            len: self.data.len(),+            offsets: self.offsets.as_ref() as _,+        }+    }++    pub fn sender<S: Synchronizer>(&mut self, synchronizer: S) -> Sender<T, S> {+        Sender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn receiver<S: Synchronizer>(&mut self, synchronizer: S) -> Receiver<T, S> {+        Receiver::new(self.descriptor(), synchronizer)+    }++    pub fn async_sender<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncSender<T, S> {+        AsyncSender::new(self.descriptor(), synchronizer)+    }++    /// Panics if there is an existing (sync or async) receiver for the same queue.+    pub fn async_receiver<S: AsyncSynchronizer>(&mut self, synchronizer: S) -> AsyncReceiver<T, S> {+        AsyncReceiver::new(self.descriptor(), synchronizer)+    }+}++pub(crate) fn try_send_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>, val: &T) -> Result</*wake up reader:*/ bool, TrySendError> {+    let (new, was_empty) = loop {+        // 1. Load the current offsets.+        let current = unsafe {+            let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+            Offsets::new(offsets, descriptor.len as u32)+        };+        let was_empty = current.is_empty();++        // 2. If the queue is full, wait, then go to step 1.+        if current.is_full() {+            return Err(TrySendError::QueueFull);+        }++        // 3. Add 1 to the write offset and do an atomic compare-and-swap (CAS)+        //    with the current offsets. If the CAS was not succesful, go to step 1.+        let new = current.increment_write_offset();+        let current = current.as_usize();+        let prev = unsafe {+            (*descriptor.offsets).compare_and_swap(current, new.as_usize(), Ordering::SeqCst)+        };+        if prev == current {+            break (new, was_empty);+        }+    };++    // 4. Write the data, then the `id`.+    let slot = unsafe { &mut *descriptor.data.add(new.write_offset()) };+    slot.copy_except_id(&val);+    slot.set_id(val.get_id());++    // 5. If the queue was empty in step 1, signal the reader to wake up.+    Ok(was_empty)+}++pub(crate) fn try_recv_impl<T: WithAtomicId>(descriptor: &FifoDescriptor<T>) -> Result<(T, /*wake up writer:*/ bool), TryRecvError> {+    // 1. Load the current offsets.+    let current = unsafe {+        let offsets = (*descriptor.offsets).load(Ordering::SeqCst);+        Offsets::new(offsets, descriptor.len as u32)+    };+    let was_full = current.is_full();++    // 2. If the queue is empty, wait, then go to step 1.+    if current.is_empty() {+        return Err(TryRecvError::QueueEmpty);+    }++    // 3. Add 1 to the read offset.+    let new = current.increment_read_offset();++    let slot = loop {+        // 4. Read the `id` at the new read offset.+        let slot = unsafe { &mut *descriptor.data.add(new.read_offset()) };+        let id = slot.get_id();++        // 5. If `id` is `0`, go to step 4 (spin). Spinning is OK because data is+        //    expected to be written imminently.+        if id != 0 {+            break slot;+        }+    };++    // 6. Read the data, then store `0` in the `id`.+    let mut val = T::empty();+    val.copy_except_id(slot);+    val.set_id(slot.get_id());+    slot.set_id(0);++    // 7. Store the new read offset.+    let after = unsafe {+        fetch_adjust(+            &*descriptor.offsets,+            new.read as isize - current.read as isize,+            Ordering::SeqCst,+        )+    };++    // 8. If the queue was full in step 1, signal the writer to wake up.+    //    ... or became full during read+    let became_full = Offsets::new(after, descriptor.len as u32).is_full();+    Ok((val, was_full || became_full))+}++lazy_static! {+    pub(crate) static ref RECEIVER_TRACKER: ReceiverTracker = ReceiverTracker::new();+}++pub(crate) struct ReceiverTracker(Mutex<HashSet<usize>>);++impl ReceiverTracker {+    fn new() -> Self {+        Self(Mutex::new(HashSet::new()))+    }++    pub(crate) fn new_receiver(&self, data_ptr: usize) {+        let already_exists = {+            let mut receivers = self.0.lock().unwrap();+            !receivers.insert(data_ptr)+        };+        if already_exists {+            panic!("Multiple receivers for the same Fifo is not allowed.");+        }+    }++    pub(crate) fn drop_receiver(&self, data_ptr: usize) {+        let mut receivers = self.0.lock().unwrap();+        receivers.remove(&data_ptr);+    }+}++// Note: we cannot have an AtomicU64 id in Usercall/Return types since they+// need to be Copy due to requirements of `UserSafeSized` (see definition of+// this trait in rust/src/libstd/sys/sgx/abi/usercalls/alloc.rs). Therefore+// all the transmutes in the implementation below.+impl WithAtomicId for Usercall {+    fn empty() -> Self {+        Self {+            id: 0,+            args: (0, 0, 0, 0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, args } = from;+        self.args = *args;+    }+}++impl WithAtomicId for Return {+    fn empty() -> Self {+        Self {+            id: 0,+            value: (0, 0),+        }+    }+    fn get_id(&self) -> u64 {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.load(Ordering::SeqCst)+    }+    fn set_id(&mut self, new_id: u64) {+        let id: &AtomicU64 = unsafe { mem::transmute(&self.id) };+        id.store(new_id, Ordering::SeqCst);+    }+    fn copy_except_id(&mut self, from: &Self) {+        let Self { id: _, value } = from;+        self.value = *value;+    }+}++pub(crate) fn fetch_adjust(x: &AtomicUsize, delta: isize, ord: Ordering) -> usize {+    match delta > 0 {+        true => x.fetch_add(delta as usize, ord),+        false => x.fetch_sub(-delta as usize, ord),+    }+}++#[derive(Clone, Copy)]+pub(crate) struct Offsets {+    write: u32,+    read: u32,+    len: u32,+}++impl Offsets {+    pub(crate) fn new(offsets: usize, len: u32) -> Self {+        Self {+            write: (offsets >> 32) as u32,+            read: (offsets & ((1 << 32) - 1)) as u32,

Unnecessary bitwise and, type cast will cover it.

mzohreva

comment created time in 12 days

Pull request review commentfortanix/rust-sgx

Implement async queues

+/* Copyright (c) Fortanix, Inc.+ *+ * This Source Code Form is subject to the terms of the Mozilla Public+ * License, v. 2.0. If a copy of the MPL was not distributed with this+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */++use fortanix_sgx_abi::FifoDescriptor;+use std::future::Future;+use std::pin::Pin;+use std::sync::atomic::AtomicUsize;++mod fifo;+mod interface_sync;+mod interface_async;+#[cfg(test)]+mod test_support;++/// A FIFO queue implemented according to [fortanix_sgx_abi specifications].+///+/// **NOTE:** Sender and reciever types use FifoDescriptor internally which+/// does not hold a reference to the Fifo instance, therefore users of these+/// types must ensure that the Fifo instance lives at least as long as all+/// senders and receivers for that queue.+///+/// **NOTE:** sync and async sender/receiver types should not be used together.+/// i.e. either use sync senders/receivers or the async ones, but don't mix+/// sync and async. The interfaces are designed for use in SGX enclaves (sync)+/// and enclave runner (async).+///+/// [fortanix_sgx_abi specifications]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/async/struct.FifoDescriptor.html+pub struct Fifo<T> {+    data: Box<[T]>,+    offsets: Box<AtomicUsize>,+}++/// This is used as a bound on `T` in `Fifo<T>` and related types.+/// Types that implement this trait must have an `id: AtomicU64` field and use+/// `Ordering::SeqCst` in `get_id()` and `set_id()`.+pub trait WithAtomicId {+    /// Must set the `id` field to 0.+    fn empty() -> Self;+    fn get_id(&self) -> u64;+    fn set_id(&mut self, id: u64);+    /// Copy everything except the `id` field from another instance to self.+    fn copy_except_id(&mut self, from: &Self);+}++#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]+pub enum QueueEvent {+    NotEmpty,+    NotFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TrySendError {+    QueueFull,+}++#[derive(Debug, PartialEq, Eq)]+pub enum TryRecvError {+    QueueEmpty,+}

Shouldn't these have a Closed variant?

mzohreva

comment created time in 12 days

PullRequestReviewEvent

push eventfortanix/rust-sgx

Jethro Beekman

commit sha 128278ed7426ca3b3ad409c84a16e128279c9c10

Support different API families for the Linux driver loader

view details

Jethro Beekman

commit sha 596434eb1b380ca01567b65819d44e08a08fffbb

Add crawler for finding all Linux SGX driver releases publihsed by Intel

view details

Jethro Beekman

commit sha cf968bb9b04de5fa71b7f5cc5d125d65e2db1309

Add script to test loading enclaves with different Linux driver versions

view details

push time in 12 days

PR opened Azure/azure-policy

Reviewers
Update listOfAllowedimagePublisher README
+5 -5

0 comment

1 changed file

pr created time in 12 days

push eventjethrogb/azure-policy

jethrogb

commit sha 094493738e9870294679a2c33389a7d7b0c57e36

Update listOfAllowedimagePublisher README

view details

push time in 12 days

fork jethrogb/azure-policy

Repository for Azure Resource Policy built-in definitions and samples

fork in 12 days

push eventjethrogb/rust

Jethro Beekman

commit sha 0122e08dae79c2320ce0a9ce60a48ec35f970aff

Update stdarch

view details

push time in 13 days

pull request commentrust-lang/rust

Update stdarch

@rustbot modify labels: -S-waiting-on-author +S-waiting-on-review

jethrogb

comment created time in 13 days

push eventjethrogb/rust

Ivan Tham

commit sha 2b7f87b5fa43336ed1237747f60fd9095a41ea3d

Liballoc tweak use *const T instead of *const i8 *const T is also used in the same parts and also used for arith_offset.

view details

Ivan Tham

commit sha cc0d6345500932e8118ba65e98944a6a3bac3277

Liballoc IntoIter limit unsafe to pointer arithmethic

view details

Ivan Tham

commit sha 50315238aa8ffae08f29b260aa36511e03b5e070

Liballoc DoubleEndedIterator limit unsafe to pointer arithmethic

view details

Tim Nielens

commit sha 2ecc2ac864739cff6aed2609021e2467dedb117a

unit-arg - improve suggestion

view details

Yoshua Wuyts

commit sha 688f4471fd553c83ae3ff0306956d89b7d7c2d28

Stabilize future readiness fns

view details

Eduardo Broto

commit sha baf62e7a38854ff6a0039ddccb124ff329a32143

Update changelog to beta 1.47

view details

Tim Nielens

commit sha f3ccbef2af24d5d83f82f1fb50bd97a9b75e609f

unit-arg - pr comments

view details

Hirochika Matsumoto

commit sha 5574182b4d2d08c848a88a1ac5633fc194e0465e

Add a new lint to prevent `create_dir` from being used

view details

Hirochika Matsumoto

commit sha 607905d126c55422668007737c22d7a7a89c0d57

Add STD_FS_CREATE_DIR into paths

view details

Hirochika Matsumoto

commit sha 34e302e67c08c9b97d58d062ea83cc1fd860c56e

Fix clippy error

view details

Hirochika Matsumoto

commit sha eebd2483654456e332d7cf53218b56b9cbd6f2f5

Fix errors

view details

bors

commit sha c88c6149415dd47b5f05e69d7307e0a1967c33f2

Auto merge of #5970 - ebroto:changelog_beta_1_47, r=flip1995 Update changelog to beta 1.47 [Rendered](https://github.com/ebroto/rust-clippy/blob/changelog_beta_1_47/CHANGELOG.md) changelog: none

view details

flip1995

commit sha 282c59820b8e1d8c76f440484b81a190c576f91b

Merge commit '3d0b0e66afdfaa519d8855b338b35b4605775945' into clippyup

view details

Eduardo Broto

commit sha 7a66e6502dc3c7085b3f4078c01d4957d96175ed

or_fn_call: ignore nullary associated const fns

view details

Hirochika Matsumoto

commit sha 5b7590f841974255f74c64d573189aecc7a30b2e

Downgrade applicability of `create_dir`

view details

Kyle Huey

commit sha 4972989b616cbf96c015cd9fdf1f4b4464ecaace

Add a lint for an async block/closure that yields a type that is itself awaitable. This catches bugs of the form tokio::spawn(async move { let f = some_async_thing(); f // Oh no I forgot to await f so that work will never complete. });

view details

Kyle Huey

commit sha c1d2b9376a6bb4fc06f845e12b9c2a93079bb2ee

Add a test for an async function.

view details

Kyle Huey

commit sha 04912ca115ff153a97d80b604435b10dcb155dd0

Formatting changes requested by ThibsG.

view details

Dylan MacKenzie

commit sha 1c5b0fbe53f842cd5871ea02e4e48571615d5679

Update dataflow analyses to use new interface

view details

Camelid

commit sha 17b2ba5ded12f59dba63ece659b5cd714b763800

Syntax-highlight `single_char_push_str` lint

view details

push time in 13 days

pull request commentrust-lang/rust

Update stdarch

Actually, please cancel this build. The upstream PR wasn't merged with a merge commit, so the commit hash is not valid.

jethrogb

comment created time in 13 days

pull request commentrust-lang/llvm-project

Pull some upstream patches related to LVI mitigations compile time

Ping for review?

jethrogb

comment created time in 13 days

pull request commentrust-lang/rust

Consolidate some duplicate code in the sys modules.

error: couldn't read library/std/src/sys/wasm/../unsupported/path.rs: No such file or directory (os error 2)
  --> library/std/src/sys/wasm/mod.rs:31:1
   |
31 | pub mod path;
   | ^^^^^^^^^^^^^
ehuss

comment created time in 13 days

pull request commentrust-lang/rust

Improve SGX RWLock initializer test

I don't mind this being merged but the point was to get a review from someone who understands uninitialized memory. ¯_(ツ)_/¯

jethrogb

comment created time in 13 days

PR opened fortanix/rust-sgx

Reviewers
Update libstd URL in README
+1 -1

0 comment

1 changed file

pr created time in 14 days

create barnchfortanix/rust-sgx

branch : jethrogb-patch-1

created branch time in 14 days

Pull request review commentfortanix/rust-sgx

Support different API families for the Linux driver loader

+#!/bin/bash -e+#+# Prior to running this script, make sure the system has the following+# configuration:+#+# * Linux: 4.15 generic kernel (e.g. not the Azure kernel)+# * Test dependencies: build-essential dkms docker.io+# * EDP dependencies: pkg-config libssl-dev protobuf-compiler+# * Rust: latest nightly installed with x86_64-fortanix-unknown-sgx target+# * No other SGX drivers or software installed (from any vendor)

Build log on a properly prepared Azure VM: test-output.txt

jethrogb

comment created time in 14 days

PullRequestReviewEvent

push eventfortanix/rust-sgx

Jethro Beekman

commit sha be9b1ce0497692e518c270a4a6247f77aa638545

Add script to test loading enclaves with different Linux driver versions

view details

push time in 14 days

PR opened fortanix/rust-sgx

Reviewers
Support different API families for the Linux driver loader
+2901 -1125

0 comment

23 changed files

pr created time in 14 days

push eventfortanix/rust-sgx

Jethro Beekman

commit sha c3de8a07c625c0a9bc576622be4357c220215a10

Support different API families for the Linux driver loader

view details

Jethro Beekman

commit sha b5b8221685af576b93ea20707dc5980d629d609b

Add crawler for finding all Linux SGX driver releases publihsed by Intel

view details

Jethro Beekman

commit sha dd029ab9dd1a015ea58c64a3e2af439d21f76609

Add script to test loading enclaves with different Linux driver versions

view details

push time in 14 days

push eventfortanix/rust-sgx

Jethro Beekman

commit sha 745ffadd2a961373510da9f8ff39ebffb77c737e

Update Cargo.lock format

view details

Jethro Beekman

commit sha f2c3e614ccc6d98fa2671a48992baaee9fd2f833

Update sgxs-loaders to 2018 edition

view details

Jethro Beekman

commit sha 20037294de3ecd46d2c2ef6d7dda67275af5b60b

Add test for sgxs-loaders

view details

Jethro Beekman

commit sha c3de8a07c625c0a9bc576622be4357c220215a10

Support different API families for the Linux driver loader

view details

Jethro Beekman

commit sha b5b8221685af576b93ea20707dc5980d629d609b

Add crawler for finding all Linux SGX driver releases publihsed by Intel

view details

Jethro Beekman

commit sha dd029ab9dd1a015ea58c64a3e2af439d21f76609

Add script to test loading enclaves with different Linux driver versions

view details

Jethro Beekman

commit sha b9c29351b1291eb133e2a54d179784445b9ce665

Add support for SGX entry VDSO

view details

push time in 14 days

create barnchfortanix/rust-sgx

branch : more-drivers

created branch time in 14 days

issue commentkubernetes/kubernetes

Lack of documentation for ImagePullBackOff timing

/remove-lifecycle stale

alexpirine

comment created time in 14 days

PR opened rust-lang/rust

Fix SGX test suite

This was broken in #76120

The definitive fix is #76278 but that's not being reviewed.

+1 -1

0 comment

1 changed file

pr created time in 14 days

create barnchjethrogb/rust

branch : jb/sgx-rwlock-init-test2

created branch time in 14 days

issue commentcrossbeam-rs/crossbeam

ArcCell not lock-free

https://github.com/crossbeam-rs/crossbeam/pull/564

jethrogb

comment created time in 19 days

pull request commentrust-lang/rust

Improve SGX RWLock initializer test

Ping for review?

jethrogb

comment created time in 20 days

PR opened rust-lang/llvm-project

Pull some upstream patches related to LVI mitigations compile time

Cherry-pick:

  • ec1445c5afda7f145a414f11c9103c87a4c1823f
  • 09897b146a8a7cb934279e630c0027d5d4de7399
  • 47fe1b63f449827e1171f944b07d0cbebad1de63
  • f0f467aeecfc615a5055d8f2edd903996c11727e
  • 4b25f672998fde5cc5bb02411e9268b2bb35655f
  • 9521704553e8a330cfdf5a0611885680073178b2

This (partially?) addresses https://github.com/rust-lang/rust/issues/74632

+277 -167

0 comment

7 changed files

pr created time in 20 days

create barnchfortanix/llvm-project

branch : jb/lvi-ballooning-fix

created branch time in 20 days

more