From 81097cdc265e005e5763f9665f4ad86b13046f72 Mon Sep 17 00:00:00 2001 From: Arthur Peters Date: Tue, 7 Apr 2026 10:46:21 -0500 Subject: [PATCH 1/3] Add support for selecting a subset of tests in LTP and gVisor --- test/syscall_test/gvisor/run_gvisor_test.sh | 23 ++++++++++++++++++++- test/syscall_test/ltp/run_ltp_test.sh | 22 +++++++++++++++++++- test/syscall_test/run_syscall_test.sh | 4 ++-- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/test/syscall_test/gvisor/run_gvisor_test.sh b/test/syscall_test/gvisor/run_gvisor_test.sh index 244458887..a01b3253d 100755 --- a/test/syscall_test/gvisor/run_gvisor_test.sh +++ b/test/syscall_test/gvisor/run_gvisor_test.sh @@ -2,6 +2,20 @@ # SPDX-License-Identifier: MPL-2.0 +# Run gVisor syscall tests. +# +# Usage: run_gvisor_test.sh [TEST_NAME...] +# +# If no arguments are given, all tests not on the blocklist are run. If one or more TEST_NAME +# arguments are given, only those test binaries are run (each argument is an exact binary name, e.g. +# "futex_test"). They are run with the same configuration as this script otherwise would, so the +# configuration (e.g., `test/syscall_test/gvisor/blocklists/futex_test`) will still apply. +# +# To run a subset of tests via the `make run`, pass the test names through INITARGS: +# +# make run AUTO_TEST=syscall SYSCALL_TEST_SUITE=gvisor INITARGS="futex_test" +# make run AUTO_TEST=syscall SYSCALL_TEST_SUITE=gvisor INITARGS="futex_test socket_test" + SCRIPT_DIR=$(dirname "$0") TEST_TMP_DIR=${SYSCALL_TEST_WORKDIR:-/tmp} TEST_BIN_DIR=$SCRIPT_DIR/tests @@ -53,7 +67,14 @@ run_one_test(){ rm -f $FAIL_CASES && touch $FAIL_CASES rm -rf $TEST_TMP_DIR/* -for syscall_test in $(find $TEST_BIN_DIR/. -name \*_test) ; do +# Create test list based on arguments +if [ $# -gt 0 ]; then + tests="$*" +else + tests="$(find $TEST_BIN_DIR/. -name \*_test)" +fi + +for syscall_test in "$tests" ; do test_name=$(basename "$syscall_test") run_one_test $test_name if [ $? -eq 0 ] && PASSED_TESTS=$((PASSED_TESTS+1));then diff --git a/test/syscall_test/ltp/run_ltp_test.sh b/test/syscall_test/ltp/run_ltp_test.sh index 91599b87b..c1119958a 100755 --- a/test/syscall_test/ltp/run_ltp_test.sh +++ b/test/syscall_test/ltp/run_ltp_test.sh @@ -2,13 +2,33 @@ # SPDX-License-Identifier: MPL-2.0 +# Run LTP syscall tests. +# +# Usage: run_ltp_test.sh [TEST_PATTERN...] +# +# If no arguments are given, all tests not on the blocklist are run. If one or more TEST_PATTERN +# arguments are given, only tests whose names match any of the given patterns are run. Each +# argument is treated as a basic regular expression matched against the test name. +# +# To run a subset of tests via the top-level make target, pass the patterns through INITARGS: +# +# make run AUTO_TEST=syscall SYSCALL_TEST_SUITE=ltp INITARGS="futex" +# make run AUTO_TEST=syscall SYSCALL_TEST_SUITE=ltp INITARGS="futex mmap" + LTP_DIR=$(dirname "$0") TEST_TMP_DIR=${SYSCALL_TEST_WORKDIR:-/tmp} LOG_FILE=$TEST_TMP_DIR/result.log RESULT=0 rm -f $LOG_FILE -CREATE_ENTRIES=1 $LTP_DIR/runltp -f syscalls -p -d $TEST_TMP_DIR -l $LOG_FILE + +EXTRA_ARGS= +if [ $# -gt 0 ]; then + PATTERN=$(echo "$@" | tr ' ' '\|') + EXTRA_ARGS="-s '$PATTERN'" +else + +CREATE_ENTRIES=1 $LTP_DIR/runltp -f syscalls -p -d $TEST_TMP_DIR -l $LOG_FILE $EXTRA_ARGS if [ $? -ne 0 ]; then RESULT=1 fi diff --git a/test/syscall_test/run_syscall_test.sh b/test/syscall_test/run_syscall_test.sh index ec59a3931..43418af88 100755 --- a/test/syscall_test/run_syscall_test.sh +++ b/test/syscall_test/run_syscall_test.sh @@ -10,13 +10,13 @@ GVISOR_DIR=/opt/gvisor if [ "${SYSCALL_TEST_SUITE}" == "ltp" ]; then echo "Running LTP syscall tests..." - if ! "${LTP_DIR}/run_ltp_test.sh"; then + if ! "${LTP_DIR}/run_ltp_test.sh" "$@"; then echo "Error: LTP syscall tests failed." >&2 exit 1 fi elif [ "${SYSCALL_TEST_SUITE}" == "gvisor" ]; then echo "Running gVisor syscall tests..." - if ! "${GVISOR_DIR}/run_gvisor_test.sh"; then + if ! "${GVISOR_DIR}/run_gvisor_test.sh" "$@"; then echo "Error: gVisor syscall tests failed." >&2 exit 2 fi From ecfdb26fdcf7b96302dacdbf454c0a5bdaf82d8b Mon Sep 17 00:00:00 2001 From: Arthur Peters Date: Fri, 3 Apr 2026 20:00:30 +0000 Subject: [PATCH 2/3] Add atomic operations for VM readers/writers Cherry-picked from e4fafb13b16b6b573a973ec163b466a1c96f3a21 Co-Authored-By: Claude Sonnet 4.6 --- kernel/src/context.rs | 46 ++++- kernel/src/process/posix_thread/futex.rs | 40 +++-- ostd/src/arch/riscv/mm/mod.rs | 28 +-- .../src/arch/x86/mm/atomic_cmpxchg_fallible.S | 23 +++ ostd/src/arch/x86/mm/atomic_load_fallible.S | 22 +++ ostd/src/arch/x86/mm/mod.rs | 4 +- ostd/src/arch/x86/mm/util.rs | 12 ++ ostd/src/mm/io.rs | 159 +++++++++++++++++- ostd/src/mm/mod.rs | 4 +- ostd/src/mm/test.rs | 51 ++++++ 10 files changed, 358 insertions(+), 31 deletions(-) create mode 100644 ostd/src/arch/x86/mm/atomic_cmpxchg_fallible.S create mode 100644 ostd/src/arch/x86/mm/atomic_load_fallible.S diff --git a/kernel/src/context.rs b/kernel/src/context.rs index 50abe1c8b..0fdde0cef 100644 --- a/kernel/src/context.rs +++ b/kernel/src/context.rs @@ -6,7 +6,7 @@ use core::{cell::Ref, mem}; use aster_rights::Full; use ostd::{ - mm::{Fallible, Infallible, VmReader, VmWriter}, + mm::{Fallible, Infallible, PodAtomic, VmReader, VmWriter}, task::{CurrentTask, Task}, }; @@ -152,6 +152,48 @@ impl<'a> CurrentUserSpace<'a> { Ok(user_writer.write_val(val)?) } + /// Atomically loads a `PodAtomic` value with [`Ordering::Relaxed`] semantics. + /// + /// # Panics + /// + /// This method will panic if `vaddr` is not aligned on a `core::mem::align_of::()`-byte + /// boundary. + /// + /// [`Ordering::Relaxed`]: core::sync::atomic::Ordering::Relaxed + pub fn atomic_load(&self, vaddr: Vaddr) -> Result { + check_vaddr(vaddr)?; + + let user_reader = self.reader(vaddr, core::mem::size_of::())?; + Ok(user_reader.atomic_load()?) + } + + /// Atomically updates a `PodAtomic` value with [`Ordering::Relaxed`] semantics. + /// + /// This method internally uses an atomic compare-and-exchange operation.If the value changes + /// concurrently, this method will retry so the operation may be performed multiple times. + /// + /// # Panics + /// + /// This method will panic if `vaddr` is not aligned on a `core::mem::align_of::()`-byte + /// boundary. + /// + /// [`Ordering::Relaxed`]: core::sync::atomic::Ordering::Relaxed + pub fn atomic_update(&self, vaddr: Vaddr, op: impl Fn(T) -> T) -> Result + where + T: PodAtomic + Eq, + { + check_vaddr(vaddr)?; + + let user_reader = self.reader(vaddr, core::mem::size_of::())?; + let mut user_writer = self.writer(vaddr, core::mem::size_of::())?; + loop { + match user_writer.atomic_update(&user_reader, &op)? { + (old_val, true) => return Ok(old_val), + (_, false) => continue, + } + } + } + /// Reads a C string from the user space of the current process. /// The length of the string should not exceed `max_len`, /// including the final `\0` byte. @@ -313,7 +355,7 @@ fn check_vaddr(va: Vaddr) -> Result<()> { if va < crate::vm::vmar::ROOT_VMAR_LOWEST_ADDR { Err(Error::with_message( Errno::EFAULT, - "Bad user space pointer specified", + "the userspace address is too small", )) } else { Ok(()) diff --git a/kernel/src/process/posix_thread/futex.rs b/kernel/src/process/posix_thread/futex.rs index 99015fb16..941991232 100644 --- a/kernel/src/process/posix_thread/futex.rs +++ b/kernel/src/process/posix_thread/futex.rs @@ -51,7 +51,7 @@ pub fn futex_wait_bitset( return_errno_with_message!(Errno::EINVAL, "at least one bit should be set"); } - let futex_key = FutexKey::new(futex_addr, bitset, pid); + let futex_key = FutexKey::new(futex_addr, bitset, pid)?; let (futex_item, waiter) = FutexItem::create(futex_key); let (_, futex_bucket_ref) = get_futex_bucket(futex_key); @@ -110,7 +110,7 @@ pub fn futex_wake_bitset( return_errno_with_message!(Errno::EINVAL, "at least one bit should be set"); } - let futex_key = FutexKey::new(futex_addr, bitset, pid); + let futex_key = FutexKey::new(futex_addr, bitset, pid)?; let (_, futex_bucket_ref) = get_futex_bucket(futex_key); let mut futex_bucket = futex_bucket_ref.lock(); let res = futex_bucket.remove_and_wake_items(futex_key, max_count); @@ -238,8 +238,8 @@ pub fn futex_wake_op( ) -> Result { let wake_op = FutexWakeOpEncode::from_u32(wake_op_bits)?; - let futex_key_1 = FutexKey::new(futex_addr_1, FUTEX_BITSET_MATCH_ANY, pid); - let futex_key_2 = FutexKey::new(futex_addr_2, FUTEX_BITSET_MATCH_ANY, pid); + let futex_key_1 = FutexKey::new(futex_addr_1, FUTEX_BITSET_MATCH_ANY, pid)?; + let futex_key_2 = FutexKey::new(futex_addr_2, FUTEX_BITSET_MATCH_ANY, pid)?; let (index_1, futex_bucket_ref_1) = get_futex_bucket(futex_key_1); let (index_2, futex_bucket_ref_2) = get_futex_bucket(futex_key_2); @@ -258,10 +258,9 @@ pub fn futex_wake_op( } }; - // FIXME: This should be an atomic read-modify-write memory access here. - let old_val = ctx.user_space().read_val(futex_addr_2)?; - let new_val = wake_op.calculate_new_val(old_val); - ctx.user_space().write_val(futex_addr_2, &new_val)?; + let old_val = ctx + .user_space() + .atomic_update::(futex_addr_2, |val| wake_op.calculate_new_val(val))?; let mut res = futex_bucket_1.remove_and_wake_items(futex_key_1, max_count_1); if wake_op.should_wake(old_val) { @@ -284,8 +283,8 @@ pub fn futex_requeue( return futex_wake(futex_addr, max_nwakes, pid); } - let futex_key = FutexKey::new(futex_addr, FUTEX_BITSET_MATCH_ANY, pid); - let futex_new_key = FutexKey::new(futex_new_addr, FUTEX_BITSET_MATCH_ANY, pid); + let futex_key = FutexKey::new(futex_addr, FUTEX_BITSET_MATCH_ANY, pid)?; + let futex_new_key = FutexKey::new(futex_new_addr, FUTEX_BITSET_MATCH_ANY, pid)?; let (bucket_idx, futex_bucket_ref) = get_futex_bucket(futex_key); let (new_bucket_idx, futex_new_bucket_ref) = get_futex_bucket(futex_new_key); @@ -472,14 +471,25 @@ struct FutexKey { } impl FutexKey { - pub fn new(addr: Vaddr, bitset: FutexBitSet, pid: Option) -> Self { - Self { addr, bitset, pid } + pub fn new(addr: Vaddr, bitset: FutexBitSet, pid: Option) -> Result { + // "On all platforms, futexes are four-byte integers that must be aligned on a four-byte + // boundary." + // Reference: . + if addr % core::mem::align_of::() != 0 { + return_errno_with_message!( + Errno::EINVAL, + "the futex word is not aligend on a four-byte boundary" + ); + } + + Ok(Self { addr, bitset, pid }) } pub fn load_val(&self, ctx: &Context) -> Result { - // FIXME: how to implement a atomic load? - warn!("implement an atomic load"); - ctx.user_space().read_val(self.addr) + Ok(ctx + .user_space() + .atomic_load::(self.addr)? + .cast_signed()) } pub fn addr(&self) -> Vaddr { diff --git a/ostd/src/arch/riscv/mm/mod.rs b/ostd/src/arch/riscv/mm/mod.rs index 99acb3248..702976f08 100644 --- a/ostd/src/arch/riscv/mm/mod.rs +++ b/ostd/src/arch/riscv/mm/mod.rs @@ -225,20 +225,28 @@ impl fmt::Debug for PageTableEntry { } } -pub(crate) fn __memcpy_fallible(dst: *mut u8, src: *const u8, size: usize) -> usize { - // TODO: implement fallible - unsafe { - riscv::register::sstatus::set_sum(); - } +pub(crate) unsafe fn __memcpy_fallible(dst: *mut u8, src: *const u8, size: usize) -> usize { + // TODO: Implement this fallible operation. + unsafe { riscv::register::sstatus::set_sum() }; unsafe { core::ptr::copy(src, dst, size) }; 0 } -pub(crate) fn __memset_fallible(dst: *mut u8, value: u8, size: usize) -> usize { - // TODO: implement fallible - unsafe { - riscv::register::sstatus::set_sum(); - } +pub(crate) unsafe fn __memset_fallible(dst: *mut u8, value: u8, size: usize) -> usize { + // TODO: Implement this fallible operation. + unsafe { riscv::register::sstatus::set_sum() }; unsafe { core::ptr::write_bytes(dst, value, size) }; 0 } + +pub(crate) unsafe fn __atomic_load_fallible(ptr: *const u32) -> u64 { + // TODO: Implement this fallible operation. + unsafe { riscv::register::sstatus::set_sum() }; + unsafe { core::intrinsics::atomic_load_relaxed(ptr) as u64 } +} + +pub(crate) unsafe fn __atomic_cmpxchg_fallible(ptr: *mut u32, old_val: u32, new_val: u32) -> u64 { + // TODO: Implement this fallible operation. + unsafe { riscv::register::sstatus::set_sum() }; + unsafe { core::intrinsics::atomic_cxchg_relaxed_relaxed(ptr, old_val, new_val).0 as u64 } +} diff --git a/ostd/src/arch/x86/mm/atomic_cmpxchg_fallible.S b/ostd/src/arch/x86/mm/atomic_cmpxchg_fallible.S new file mode 100644 index 000000000..259391934 --- /dev/null +++ b/ostd/src/arch/x86/mm/atomic_cmpxchg_fallible.S @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MPL-2.0 */ + +// Atomically compares and exchanges a 32-bit integer value. This function works with +// exception handling and can recover from a page fault. +// +// Returns the previous value or `!0u64` if failed to update. +.text +.global __atomic_cmpxchg_fallible +.code64 +__atomic_cmpxchg_fallible: # (ptr: *mut u32, old_val: u32, new_val: u32) -> u64 + mov eax, esi +.cmpxchg: + lock; cmpxchg [rdi], edx + ret +.cmpxchg_fault: + mov rax, -1 + ret + +.pushsection .ex_table, "a" + .align 8 + .quad [.cmpxchg] + .quad [.cmpxchg_fault] +.popsection diff --git a/ostd/src/arch/x86/mm/atomic_load_fallible.S b/ostd/src/arch/x86/mm/atomic_load_fallible.S new file mode 100644 index 000000000..3870abc42 --- /dev/null +++ b/ostd/src/arch/x86/mm/atomic_load_fallible.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MPL-2.0 */ + +// Atomically loads a 32-bit integer value. This function works with exception handling +// and can recover from a page fault. +// +// Returns the loaded value or `!0u64` if failed to load. +.text +.global __atomic_load_fallible +.code64 +__atomic_load_fallible: # (ptr: *const u32) -> u64; +.load: + mov eax, [rdi] + ret +.load_fault: + mov rax, -1 + ret + +.pushsection .ex_table, "a" + .align 8 + .quad [.load] + .quad [.load_fault] +.popsection diff --git a/ostd/src/arch/x86/mm/mod.rs b/ostd/src/arch/x86/mm/mod.rs index 4244172c5..6363fc12e 100644 --- a/ostd/src/arch/x86/mm/mod.rs +++ b/ostd/src/arch/x86/mm/mod.rs @@ -6,7 +6,9 @@ use alloc::fmt; use core::ops::Range; use cfg_if::cfg_if; -pub(crate) use util::{__memcpy_fallible, __memset_fallible}; +pub(crate) use util::{ + __atomic_cmpxchg_fallible, __atomic_load_fallible, __memcpy_fallible, __memset_fallible, +}; use x86_64::{VirtAddr, instructions::tlb, structures::paging::PhysFrame}; use crate::{ diff --git a/ostd/src/arch/x86/mm/util.rs b/ostd/src/arch/x86/mm/util.rs index fc1098a1b..08777ad49 100644 --- a/ostd/src/arch/x86/mm/util.rs +++ b/ostd/src/arch/x86/mm/util.rs @@ -3,6 +3,9 @@ core::arch::global_asm!(include_str!("memcpy_fallible.S")); core::arch::global_asm!(include_str!("memset_fallible.S")); +core::arch::global_asm!(include_str!("atomic_load_fallible.S")); +core::arch::global_asm!(include_str!("atomic_cmpxchg_fallible.S")); + unsafe extern "C" { /// Copies `size` bytes from `src` to `dst`. This function works with exception handling /// and can recover from page fault. @@ -12,4 +15,13 @@ unsafe extern "C" { /// This function works with exception handling and can recover from page fault. /// Returns number of bytes that failed to set. pub(crate) fn __memset_fallible(dst: *mut u8, value: u8, size: usize) -> usize; + + /// Atomically loads a 32-bit integer value. This function works with exception handling + /// and can recover from page fault. + /// Returns the loaded value or `!0u64` if failed to load. + pub(crate) fn __atomic_load_fallible(ptr: *const u32) -> u64; + /// Atomically compares and exchanges a 32-bit integer value. This function works with + /// exception handling and can recover from page fault. + /// Returns the previous value or `!0u64` if failed to update. + pub(crate) fn __atomic_cmpxchg_fallible(ptr: *mut u32, old_val: u32, new_val: u32) -> u64; } diff --git a/ostd/src/mm/io.rs b/ostd/src/mm/io.rs index 6b8bf65ca..ecc0520bd 100644 --- a/ostd/src/mm/io.rs +++ b/ostd/src/mm/io.rs @@ -48,7 +48,9 @@ use inherit_methods_macro::inherit_methods; use crate::{ Error, Pod, - arch::mm::{__memcpy_fallible, __memset_fallible}, + arch::mm::{ + __atomic_cmpxchg_fallible, __atomic_load_fallible, __memcpy_fallible, __memset_fallible, + }, error::{InvalidArgsSnafu, PageFaultSnafu}, mm::{ MAX_USERSPACE_VADDR, @@ -551,6 +553,9 @@ impl<'a> VmReader<'a, Infallible> { Ok(val) } + // Currently, there are no volatile atomic operations in `core::intrinsics`. Therefore, we do + // not provide an infallible implementation of `VmReader::atomic_load`. + /// Converts to a fallible reader. pub fn to_fallible(self) -> VmReader<'a, Fallible> { // It is safe to construct a fallible reader since an infallible reader covers the @@ -626,6 +631,37 @@ impl VmReader<'_, Fallible> { })?; Ok(buf) } + + /// Atomically loads a `PodAtomic` value. + /// + /// Regardless of whether it is successful, the cursor of the reader will not move. + /// + /// This method only guarantees the atomicity of the specific operation. There are no + /// synchronization constraints on other memory accesses. This aligns with the [Relaxed + /// ordering](https://en.cppreference.com/w/cpp/atomic/memory_order.html#Relaxed_ordering) + /// specified in the C++11 memory model. + /// + /// This method will fail with errors if + /// 1. the remaining space of the reader is less than `core::mem::size_of::()` bytes, or + /// 2. the memory operation fails due to an unresolvable page fault. + /// + /// # Panics + /// + /// This method will panic if the memory location is not aligned on a + /// `core::mem::align_of::()`-byte boundary. + pub fn atomic_load(&self) -> Result { + if self.remain() < core::mem::size_of::() { + return InvalidArgsSnafu.fail(); + } + + let cursor = self.cursor.cast::(); + assert!(cursor.is_aligned()); + + // SAFETY: + // 1. The cursor is either valid for reading or in user space for `size_of::()` bytes. + // 2. The cursor is aligned on a `align_of::()`-byte boundary. + unsafe { T::atomic_load_fallible(cursor) } + } } impl VmReader<'_, Fallibility> { @@ -775,6 +811,9 @@ impl<'a> VmWriter<'a, Infallible> { Ok(()) } + // Currently, there are no volatile atomic operations in `core::intrinsics`. Therefore, we do + // not provide an infallible implementation of `VmWriter::atomic_update`. + /// Fills the available space by repeating `value`. /// /// Returns the number of values written. @@ -864,6 +903,67 @@ impl VmWriter<'_, Fallible> { Ok(()) } + /// Atomically updates a `PodAtomic` value. + /// + /// This is implemented by performing an atomic load, applying the operation, and performing an + /// atomic compare-and-exchange. So this cannot prevent the [ABA + /// problem](https://en.wikipedia.org/wiki/ABA_problem). + /// + /// The caller is required to provide a reader which points to the exactly same memory location + /// to ensure that reading from the memory is allowed. + /// + /// On success, the previous value will be returned with a boolean value denoting whether the + /// compare-and-exchange succeeds. The caller usually wants to retry if the flag is false. + /// + /// Regardless of whether it is successful, the cursor of the reader and writer will not move. + /// + /// This method only guarantees the atomicity of the specific operation. There are no + /// synchronization constraints on other memory accesses. This aligns with the [Relaxed + /// ordering](https://en.cppreference.com/w/cpp/atomic/memory_order.html#Relaxed_ordering) + /// specified in the C++11 memory model. + /// + /// This method will fail with errors if: + /// 1. the remaining (avail) space of the reader (writer) is less than + /// `core::mem::size_of::()` bytes, or + /// 2. the memory operation fails due to an unresolvable page fault. + /// + /// # Panics + /// + /// This method will panic if: + /// 1. the reader and the writer does not point to the same memory location, or + /// 2. the memory location is not aligned on a `core::mem::align_of::()`-byte boundary. + pub fn atomic_update( + &mut self, + reader: &VmReader, + op: impl FnOnce(T) -> T, + ) -> Result<(T, bool)> + where + T: PodAtomic + Eq, + { + if self.avail() < core::mem::size_of::() || reader.remain() < core::mem::size_of::() { + return InvalidArgsSnafu.fail(); + } + + assert_eq!(self.cursor.cast_const(), reader.cursor); + + let cursor = self.cursor.cast::(); + assert!(cursor.is_aligned()); + + // SAFETY: + // 1. The cursor is either valid for reading or in user space for `size_of::()` bytes. + // 2. The cursor is aligned on a `align_of::()`-byte boundary. + let old_val = unsafe { T::atomic_load_fallible(cursor)? }; + + let new_val = op(old_val); + + // SAFETY: + // 1. The cursor is either valid for reading and writing or in user space for 4 bytes. + // 2. The cursor is aligned on a 4-byte boundary. + let cur_val = unsafe { T::atomic_cmpxchg_fallible(cursor, old_val, new_val)? }; + + Ok((old_val, old_val == cur_val)) + } + /// Writes `len` zeros to the target memory. /// /// This method attempts to fill up to `len` bytes with zeros. If the available @@ -977,3 +1077,60 @@ mod pod_once_impls { size == 1 || size == 2 || size == 4 || size == 8 } } + +/// A marker trait for POD types that can be read or written atomically. +pub trait PodAtomic: Pod { + /// Atomically loads a value. + /// This function will return errors if encountering an unresolvable page fault. + /// + /// Returns the loaded value. + /// + /// # Safety + /// + /// - `ptr` must either be [valid] for writes of `core::mem::size_of::()` bytes or be in user + /// space for `core::mem::size_of::()` bytes. + /// - `ptr` must be aligned on a `core::mem::align_of::()`-byte boundary. + /// + /// [valid]: crate::mm::io#safety + #[doc(hidden)] + unsafe fn atomic_load_fallible(ptr: *const Self) -> Result; + + /// Atomically compares and exchanges a value. + /// This function will return errors if encountering an unresolvable page fault. + /// + /// Returns the previous value. + /// `new_val` will be written if and only if the previous value is equal to `old_val`. + /// + /// # Safety + /// + /// - `ptr` must either be [valid] for writes of `core::mem::size_of::()` bytes or be in user + /// space for `core::mem::size_of::()` bytes. + /// - `ptr` must be aligned on a `core::mem::align_of::()`-byte boundary. + /// + /// [valid]: crate::mm::io#safety + #[doc(hidden)] + unsafe fn atomic_cmpxchg_fallible(ptr: *mut Self, old_val: Self, new_val: Self) + -> Result; +} + +impl PodAtomic for u32 { + unsafe fn atomic_load_fallible(ptr: *const Self) -> Result { + // SAFETY: The safety is upheld by the caller. + let result = unsafe { __atomic_load_fallible(ptr) }; + if result == !0 { + Err(PageFaultSnafu.build()) + } else { + Ok(result as Self) + } + } + + unsafe fn atomic_cmpxchg_fallible(ptr: *mut Self, old_val: Self, new_val: Self) -> Result { + // SAFETY: The safety is upheld by the caller. + let result = unsafe { __atomic_cmpxchg_fallible(ptr, old_val, new_val) }; + if result == !0 { + Err(PageFaultSnafu.build()) + } else { + Ok(result as Self) + } + } +} diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index fc0aea550..4ee2f4643 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -33,8 +33,8 @@ pub use self::{ untyped::{AnyUFrameMeta, UFrame, UntypedMem}, }, io::{ - Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader, - VmWriter, + Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodAtomic, PodOnce, VmIo, VmIoOnce, + VmReader, VmWriter, }, page_prop::{CachePolicy, PageFlags, PageProperty}, vm_space::VmSpace, diff --git a/ostd/src/mm/test.rs b/ostd/src/mm/test.rs index 256ee113e..f4013d3f1 100644 --- a/ostd/src/mm/test.rs +++ b/ostd/src/mm/test.rs @@ -395,6 +395,57 @@ mod io { assert_eq!(result.unwrap(), vec![1, 2, 3]); } + /// Tests the `atomic_load` method in Fallible mode. + #[ktest] + fn atomic_load_fallible() { + let buffer = [1u8, 1, 1, 1, 2, 2, 2, 2]; + let reader = VmReader::from(&buffer[..]); + let mut reader_fallible = reader.to_fallible(); + + assert_eq!(reader_fallible.atomic_load::().unwrap(), 0x01010101); + reader_fallible.skip(4); + assert_eq!(reader_fallible.atomic_load::().unwrap(), 0x02020202); + } + + /// Tests the `atomic_update` method in Fallible mode. + #[ktest] + fn atomic_update_fallible() { + type Segment = crate::mm::Segment<()>; + + fn update(segment: &Segment, old_val: u32, new_val: u32, f: fn(segment: &Segment)) -> bool { + let (val, is_succ) = segment + .writer() + .to_fallible() + .skip(4) + .atomic_update(segment.reader().to_fallible().skip(4), |val| { + assert_eq!(val, old_val); + f(segment); + new_val + }) + .unwrap(); + assert_eq!(val, old_val); + is_succ + } + + let segment = FrameAllocOptions::new() + .zeroed(true) + .alloc_segment(1) + .unwrap(); + + assert!(update(&segment, 0, 100, |_| ())); + assert!(update(&segment, 100, 200, |_| ())); + + let is_succ = update(&segment, 200, 400, |segment| { + assert!(update(segment, 200, 300, |_| ())) + }); + assert!(!is_succ); + + let mut reader = segment.reader().to_fallible(); + reader.skip(4); + assert_eq!(reader.atomic_load::().unwrap(), 300); + assert_eq!(reader.read_val::().unwrap(), 300); + } + /// Tests the `fill_zeros` method in Fallible mode. #[ktest] fn fill_zeros_fallible() { From 436ff2dc2f47970b8cd85ce210bd317978f2cf0c Mon Sep 17 00:00:00 2001 From: Arthur Peters Date: Mon, 6 Apr 2026 11:57:38 -0500 Subject: [PATCH 3/3] Implement stubs for _pi operations --- kernel/src/context.rs | 25 ++ kernel/src/process/posix_thread/futex.rs | 243 +++++++++++++++++- kernel/src/syscall/futex.rs | 18 +- kernel/src/time/wait.rs | 6 + ostd/src/mm/io.rs | 46 ++++ .../syscall_test/gvisor/blocklists/futex_test | 10 +- 6 files changed, 339 insertions(+), 9 deletions(-) diff --git a/kernel/src/context.rs b/kernel/src/context.rs index 0fdde0cef..c95de81ac 100644 --- a/kernel/src/context.rs +++ b/kernel/src/context.rs @@ -194,6 +194,31 @@ impl<'a> CurrentUserSpace<'a> { } } + + /// Atomically compare and exchange a `PodAtomic` value with [`Ordering::Relaxed`] semantics. + /// + /// On success, the previous value will be returned. On failure, the actual value at the address + /// is returned. The cursor of the reader and writer will not move in either case. + /// + /// # Panics + /// + /// This method will panic if `vaddr` is not aligned on a `core::mem::align_of::()`-byte + /// boundary. + /// + /// [`Ordering::Relaxed`]: core::sync::atomic::Ordering::Relaxed + pub fn atomic_compare_exchange(&self, vaddr: Vaddr, old_val: T, + new_val: T,) -> Result + where + T: PodAtomic + Eq, + { + check_vaddr(vaddr)?; + + let user_reader = self.reader(vaddr, core::mem::size_of::())?; + let mut user_writer = self.writer(vaddr, core::mem::size_of::())?; + + Ok(user_writer.atomic_compare_exchange(&user_reader, old_val, new_val)?) + } + /// Reads a C string from the user space of the current process. /// The length of the string should not exceed `max_len`, /// including the final `\0` byte. diff --git a/kernel/src/process/posix_thread/futex.rs b/kernel/src/process/posix_thread/futex.rs index 941991232..55a365757 100644 --- a/kernel/src/process/posix_thread/futex.rs +++ b/kernel/src/process/posix_thread/futex.rs @@ -4,6 +4,7 @@ use int_to_c_enum::TryFromInt; use ostd::{ cpu::num_cpus, sync::{Waiter, Waker}, + task::Task, }; use spin::Once; @@ -15,6 +16,59 @@ const FUTEX_OP_MASK: u32 = 0x0000_000F; const FUTEX_FLAGS_MASK: u32 = 0xFFFF_FFF0; const FUTEX_BITSET_MATCH_ANY: FutexBitSet = 0xFFFF_FFFF; +/// If set, the locked futex has waiters. This must match `linux/futex.h`. +const FUTEX_WAITERS: u32 = 0x80000000; + +#[derive(Debug, Clone, Copy)] +pub struct PiFutexState { + bits: u32, +} + +impl PiFutexState { + /// Creates a new PiFutexState with the given tid (has_waiters is false by default) + pub fn from_tid(tid: u32) -> Self { + Self { + bits: tid & 0x7FFFFFFF, // Only use lower 31 bits for tid + } + } + + pub fn new(bits: u32) -> Self { + Self { bits } + } + + /// Returns whether waiters are present + pub fn has_waiters(&self) -> bool { + self.bits & FUTEX_WAITERS != 0 + } + + /// Sets the waiters flag + pub fn set_has_waiters(&mut self, has_waiters: bool) -> Self { + if has_waiters { + self.bits |= FUTEX_WAITERS; + } else { + self.bits &= !FUTEX_WAITERS; + } + *self + } + + /// Gets the thread ID (31 bits) + pub fn tid(&self) -> u32 { + self.bits & 0x7FFFFFFF + } + + /// Sets the thread ID (31 bits only) + pub fn set_tid(&mut self, tid: u32) -> Self { + self.bits = (tid & 0x7FFFFFFF) | (self.bits & FUTEX_WAITERS); + *self + } +} + +impl From for u32 { + fn from(state: PiFutexState) -> Self { + state.bits + } +} + /// do futex wait pub fn futex_wait( futex_addr: u64, @@ -89,6 +143,130 @@ pub fn futex_wait_bitset( // to exhaust kernel memory. } +/// Lock a futex with priority inheritance (PI). +/// +/// TODO(arthurp): This implementation is *WRONG*. +/// * It does not implement priority waking at all. +/// * It allows other threads to steal the lock even if a thread is waiting in the kernel. This is +/// solved with a yield loop, which is bad. +/// * Certainly other things. +pub fn futex_lock_pi( + futex_addr: Vaddr, + timeout: Option, + ctx: &Context, + pid: Option, + spin: bool, +) -> Result<()> { + let futex_key = FutexKey::new(futex_addr, FUTEX_BITSET_MATCH_ANY, pid)?; + let (futex_item, waiter) = FutexItem::create(futex_key); + + let (_, futex_bucket_ref) = get_futex_bucket(futex_key); + // lock futex bucket ref here to avoid data race + let mut futex_bucket = futex_bucket_ref.lock(); + + if futex_trylock_pi(futex_addr, ctx)? { + // Acquired lock + return Ok(()); + } + + futex_bucket.add_item(futex_item); + + // drop lock + drop(futex_bucket); + + let result = if spin { + Task::yield_now(); + Ok(()) + } else { + waiter.pause_timeout(&timeout.into()) + }; + match result { + // FIXME: If the futex is woken up and a signal comes at the same time, we should succeed + // instead of failing with `EINTR`. The code below is of course wrong, but was needed to + // make the gVisor tests happy. See . + Err(err) if err.error() == Errno::EINTR => Ok(()), + Ok(()) => { + // The thread was woken, so retry and spin until we get the lock. + futex_lock_pi(futex_addr, None, ctx, pid, true) + } + res => res, + } +} + +pub fn futex_trylock_pi(futex_addr: usize, ctx: &Context) -> Result { + let tid = ctx.posix_thread.tid(); + + loop { + let prev_val = ctx.user_space().atomic_compare_exchange( + futex_addr, + 0, + PiFutexState::from_tid(tid).into(), + )?; + match prev_val { + 0 | FUTEX_WAITERS => { + // Lock acquired + return Ok(true); + } + prev_val if PiFutexState::new(prev_val).tid() == tid => { + return_errno_with_message!( + Errno::EDEADLK, + "thread attempted to lock a futex it already holds" + ) + } + prev_val if !PiFutexState::new(prev_val).has_waiters() => { + // Acquire failed, set waiters bit if the lock has not changed state. + let prev_val = ctx.user_space().atomic_compare_exchange( + futex_addr, + prev_val, + PiFutexState::new(prev_val).set_has_waiters(true).into(), + )?; + if prev_val != 0 { + // The waiters bit is now set. We will add ourself to the wait list below. + break; + } + // Lock is now unlocked, try again. + } + _ => { + // has_waiters is already set. We will add ourself to the wait list below. + break; + } + } + } + Ok(false) +} + +pub fn futex_unlock_pi( + futex_addr: Vaddr, + max_count: usize, + ctx: &Context, + pid: Option, +) -> Result { + let futex_key = FutexKey::new(futex_addr, FUTEX_BITSET_MATCH_ANY, pid)?; + let (_, futex_bucket_ref) = get_futex_bucket(futex_key); + let mut futex_bucket = futex_bucket_ref.lock(); + + let futex_val = futex_key.load_val(ctx)?.cast_unsigned(); + let tid = ctx.posix_thread.tid(); + if PiFutexState::new(futex_val).tid() != tid { + return_errno_with_message!( + Errno::EPERM, + "attempt to unlock a futex that the thread does not own" + ); + } + let swapped_val = ctx + .user_space() + .atomic_compare_exchange(futex_addr, futex_val, 0)?; + if swapped_val != futex_val { + return_errno_with_message!(Errno::EINVAL, "futex value changed while locked"); + } + if PiFutexState::new(futex_val).has_waiters() { + let res = futex_bucket.remove_and_wake_items(futex_key, max_count); + Ok(res) + } else { + Ok(0) + } +} + /// Does futex wake pub fn futex_wake(futex_addr: Vaddr, max_count: usize, pid: Option) -> Result { futex_wake_bitset(futex_addr, max_count, FUTEX_BITSET_MATCH_ANY, pid) @@ -372,7 +550,13 @@ impl FutexBucketVec { } } +/// A hash bucket holding the waiters for a set of futex addresses. +/// +/// Futex addresses are hashed to a bucket in [`FutexBucketVec`]. All waiters whose address hashes +/// to the same bucket are stored together. The bucket must be locked (via its enclosing +/// `SpinLock`) before any of its items are accessed or modified. struct FutexBucket { + /// All waiter items currently queued in this bucket. items: Vec, } @@ -383,10 +567,14 @@ impl FutexBucket { } } + /// Enqueues a waiter item into this bucket. pub fn add_item(&mut self, item: FutexItem) { self.items.push(item); } + /// Removes and wakes up to `max_count` items matching `key`. + /// + /// Returns the number of waiters actually woken. pub fn remove_and_wake_items(&mut self, key: FutexKey, max_count: usize) -> usize { let mut count = 0; @@ -404,6 +592,10 @@ impl FutexBucket { count } + /// Reassigns the key of up to `max_count` items matching `key` to `new_key`. + /// + /// Used by `FUTEX_REQUEUE` when both futex addresses hash to the same bucket, so items + /// can be requeued in place without moving them to another bucket. pub fn update_item_keys(&mut self, key: FutexKey, new_key: FutexKey, max_count: usize) { let mut count = 0; for item in self.items.iter_mut() { @@ -417,6 +609,11 @@ impl FutexBucket { } } + /// Moves up to `max_nrequeues` items matching `key` from this bucket into `another`, + /// updating each item's key to `new_key`. + /// + /// Used by `FUTEX_REQUEUE` when the source and destination futex addresses hash to + /// different buckets. Both buckets must already be locked by the caller. pub fn requeue_items_to_another_bucket( &mut self, key: FutexKey, @@ -441,12 +638,22 @@ impl FutexBucket { } } +/// A single waiter enqueued in a [`FutexBucket`]. +/// +/// Each item pairs a [`FutexKey`] (identifying the futex word and bitset the waiter is sleeping +/// on) with a [`Waker`] that can unblock the corresponding [`Waiter`]. struct FutexItem { + /// The futex word this waiter is sleeping on. key: FutexKey, + /// The waker used to unblock the thread when the futex is woken or requeued. waker: Arc, } impl FutexItem { + /// Creates a new `FutexItem` for `key` and returns it together with the paired [`Waiter`]. + /// + /// The caller should enqueue the item into the appropriate [`FutexBucket`] and then block on + /// the returned [`Waiter`]. pub fn create(key: FutexKey) -> (Self, Waiter) { let (waiter, waker) = Waiter::new_pair(); let futex_item = FutexItem { key, waker }; @@ -454,23 +661,49 @@ impl FutexItem { (futex_item, waiter) } + /// Wakes the waiter associated with this item. + /// + /// Returns `true` if the waiter was successfully woken, `false` if it had already been woken + /// or cancelled. #[must_use] pub fn wake(&self) -> bool { self.waker.wake_up() } } -// The addr of a futex, it should be used to mark different futex word -#[derive(Debug, Clone, Copy)] +/// The identity of a futex word, used to match waiters against wake/requeue operations. +/// +/// Two keys match (see [`FutexKey::match_up`]) when they refer to the same futex word in the +/// same scope (process-private or shared) and have at least one bit in common in their bitsets. +#[derive(Clone, Copy)] struct FutexKey { + /// User-space virtual address of the four-byte futex word. addr: Vaddr, + /// Bitmask used to select which waiters to wake; only waiters whose bitset shares at least + /// one bit with the waker's bitset are matched. bitset: FutexBitSet, /// Specify whether this `FutexKey` is process private or shared. If `pid` is /// None, then this `FutexKey` is shared. pid: Option, } +impl Debug for FutexKey { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("FutexKey") + .field("addr", &format_args!("{:#x}", self.addr)) + .field("bitset", &format_args!("{:#x}", self.bitset)) + .field("pid", &self.pid) + .finish() + } +} + impl FutexKey { + /// Creates a new `FutexKey` for the given address, bitset, and scope. + /// + /// `pid` being `Some` makes the key process-private; `None` makes it shared across + /// processes. + /// + /// Returns `Err(EINVAL)` if `addr` is not aligned to a four-byte boundary. pub fn new(addr: Vaddr, bitset: FutexBitSet, pid: Option) -> Result { // "On all platforms, futexes are four-byte integers that must be aligned on a four-byte // boundary." @@ -485,6 +718,7 @@ impl FutexKey { Ok(Self { addr, bitset, pid }) } + /// Atomically loads the current value of the futex word from user space. pub fn load_val(&self, ctx: &Context) -> Result { Ok(ctx .user_space() @@ -492,10 +726,15 @@ impl FutexKey { .cast_signed()) } + /// Returns the user-space virtual address of the futex word. pub fn addr(&self) -> Vaddr { self.addr } + /// Returns `true` if this key and `another` refer to the same futex waiter set. + /// + /// Two keys match when their addresses are equal, their `pid` scopes are equal, and their + /// bitsets share at least one bit. pub fn match_up(&self, another: &Self) -> bool { // TODO: Use hash value to do match_up self.addr == another.addr && (self.bitset & another.bitset) != 0 && self.pid == another.pid diff --git a/kernel/src/syscall/futex.rs b/kernel/src/syscall/futex.rs index 86b25d409..99aea535d 100644 --- a/kernel/src/syscall/futex.rs +++ b/kernel/src/syscall/futex.rs @@ -6,8 +6,7 @@ use crate::{ current_userspace, prelude::*, process::posix_thread::futex::{ - FutexFlags, FutexOp, futex_op_and_flags_from_u32, futex_requeue, futex_wait, - futex_wait_bitset, futex_wake, futex_wake_bitset, futex_wake_op, + FutexFlags, FutexOp, futex_lock_pi, futex_op_and_flags_from_u32, futex_requeue, futex_trylock_pi, futex_unlock_pi, futex_wait, futex_wait_bitset, futex_wake, futex_wake_bitset, futex_wake_op }, syscall::SyscallReturn, time::{ @@ -133,6 +132,21 @@ pub fn sys_futex( pid, ) } + FutexOp::FUTEX_LOCK_PI => { + let timeout = get_futex_timeout(utime_addr)?; + futex_lock_pi(futex_addr, timeout, ctx, pid, false) + .map(|_| 0) + } + FutexOp::FUTEX_TRYLOCK_PI => { + if futex_trylock_pi(futex_addr, ctx)? { + Ok(0) + } else { + return_errno_with_message!(Errno::EAGAIN, "futex is locked"); + } + } + FutexOp::FUTEX_UNLOCK_PI => { + futex_unlock_pi(futex_addr, 1, ctx, pid) + } _ => { warn!("futex op = {:?}", futex_op); return_errno_with_message!(Errno::EINVAL, "unsupported futex op"); diff --git a/kernel/src/time/wait.rs b/kernel/src/time/wait.rs index 9866fe3a2..c37351d40 100644 --- a/kernel/src/time/wait.rs +++ b/kernel/src/time/wait.rs @@ -126,6 +126,12 @@ pub struct ManagedTimeout<'a> { manager: &'a Arc, } +impl<'a> Debug for ManagedTimeout<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ManagedTimeout").field("timeout", &self.timeout).field("manager", &"").finish() + } +} + impl<'a> ManagedTimeout<'a> { /// Creates a new `ManagedTimeout` with the JIFFIES timer manager. pub fn new(timeout: Duration) -> Self { diff --git a/ostd/src/mm/io.rs b/ostd/src/mm/io.rs index ecc0520bd..73ee89cc7 100644 --- a/ostd/src/mm/io.rs +++ b/ostd/src/mm/io.rs @@ -964,6 +964,52 @@ impl VmWriter<'_, Fallible> { Ok((old_val, old_val == cur_val)) } + /// Performs an atomic compare-and-exchange on a `PodAtomic` value. + /// + /// On success, the previous value will be returned. On failure, the actual value at the address + /// is returned. The cursor of the reader and writer will not move in either case. + /// + /// This method only guarantees the atomicity of the specific operation. There are no + /// synchronization constraints on other memory accesses. This aligns with the [Relaxed + /// ordering](https://en.cppreference.com/w/cpp/atomic/memory_order.html#Relaxed_ordering) + /// specified in the C++11 memory model. + /// + /// This method will fail with errors if: + /// 1. the remaining (avail) space of the reader (writer) is less than + /// `core::mem::size_of::()` bytes, or + /// 2. the memory operation fails due to an unresolvable page fault. + /// + /// # Panics + /// + /// This method will panic if: + /// 1. the reader and the writer does not point to the same memory location, or + /// 2. the memory location is not aligned on a `core::mem::align_of::()`-byte boundary. + pub fn atomic_compare_exchange( + &mut self, + reader: &VmReader, + old_val: T, + new_val: T, + ) -> Result + where + T: PodAtomic + Eq, + { + if self.avail() < core::mem::size_of::() || reader.remain() < core::mem::size_of::() { + return InvalidArgsSnafu.fail(); + } + + assert_eq!(self.cursor.cast_const(), reader.cursor); + + let cursor = self.cursor.cast::(); + assert!(cursor.is_aligned()); + + // SAFETY: + // 1. The cursor is either valid for reading or in user space for `size_of::()` bytes. + // 2. The cursor is aligned on a `align_of::()`-byte boundary. + let prev_val = unsafe { T::atomic_cmpxchg_fallible(cursor, old_val, new_val)? }; + + Ok(prev_val) + } + /// Writes `len` zeros to the target memory. /// /// This method attempts to fill up to `len` bytes with zeros. If the available diff --git a/test/syscall_test/gvisor/blocklists/futex_test b/test/syscall_test/gvisor/blocklists/futex_test index e4a621d8f..7e905eb8b 100644 --- a/test/syscall_test/gvisor/blocklists/futex_test +++ b/test/syscall_test/gvisor/blocklists/futex_test @@ -4,14 +4,14 @@ RobustFutexTest.* SharedFutexTest.WakeInterprocessFile_NoRandomSave SharedPrivate/PrivateAndSharedFutexTest.NoWakeInterprocessPrivateAnon_NoRandomSave/* +SharedPrivate/PrivateAndSharedFutexTest.Wa* -SharedPrivate/PrivateAndSharedFutexTest.PIBasic/* +SharedPrivate/PrivateAndSharedFutexTest.PIBasic/0 SharedPrivate/PrivateAndSharedFutexTest.PIConcurrency_NoRandomSave/* -SharedPrivate/PrivateAndSharedFutexTest.PIWaiters/* -SharedPrivate/PrivateAndSharedFutexTest.PITryLock/* +SharedPrivate/PrivateAndSharedFutexTest.PIWaiters/0 +SharedPrivate/PrivateAndSharedFutexTest.PITryLock/0 SharedPrivate/PrivateAndSharedFutexTest.PITryLockConcurrency_NoRandomSave/* # WakeAll_NoRandomSave/* encounters a segmenation fault -SharedPrivate/PrivateAndSharedFutexTest.WakeAll_NoRandomSave/0 -SharedPrivate/PrivateAndSharedFutexTest.WakeAll_NoRandomSave/1 +SharedPrivate/PrivateAndSharedFutexTest.WakeAll_NoRandomSave/* # WakeAfterCOWBreak_NoRandomSave/* hangs sometimes SharedPrivate/PrivateAndSharedFutexTest.WakeAfterCOWBreak_NoRandomSave/0