On Fri, Aug 02, 2024 at 07:38:32AM +0000, Alice Ryhl wrote: [...] > +// These methods are safe to call even if `mm_users` is zero. > +impl Mm { > + /// Call `mmgrab` on `current.mm`. > + #[inline] > + pub fn mmgrab_current() -> Option<ARef<Mm>> { > + // SAFETY: It's safe to get the `mm` field from current. > + let mm = unsafe { > + let current = bindings::get_current(); > + (*current).mm > + }; > + > + let mm = NonNull::new(mm)?; > + > + // SAFETY: We just checked that `mm` is not null. > + unsafe { bindings::mmgrab(mm.as_ptr()) }; > + > + // SAFETY: We just created an `mmgrab` refcount. Layouts are compatible due to > + // repr(transparent). > + Some(unsafe { ARef::from_raw(mm.cast()) }) We can use from_raw() + into() here. If a type `impl`s AlwaysRefcounted, we should have no chance to call the "refcount increment" function directly. > + } > + > + /// Returns a raw pointer to the inner `mm_struct`. > + #[inline] > + pub fn as_raw(&self) -> *mut bindings::mm_struct { > + self.mm.get() > + } > + > + /// Obtain a reference from a raw pointer. > + /// > + /// # Safety > + /// > + /// The caller must ensure that `ptr` points at an `mm_struct`, and that it is not deallocated > + /// during the lifetime 'a. > + #[inline] > + pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a Mm { > + // SAFETY: Caller promises that the pointer is valid for 'a. Layouts are compatible due to > + // repr(transparent). > + unsafe { &*ptr.cast() } > + } > + > + /// Check whether this vma is associated with this mm. > + #[inline] > + pub fn is_same_mm(&self, area: &virt::VmArea) -> bool { > + // SAFETY: The `vm_mm` field of the area is immutable, so we can read it without > + // synchronization. > + let vm_mm = unsafe { (*area.as_ptr()).vm_mm }; > + > + ptr::eq(vm_mm, self.as_raw()) > + } > + > + /// Calls `mmget_not_zero` and returns a handle if it succeeds. > + #[inline] > + pub fn mmget_not_zero(&self) -> Option<ARef<MmWithUser>> { > + // SAFETY: The pointer is valid since self is a reference. > + let success = unsafe { bindings::mmget_not_zero(self.as_raw()) }; > + > + if success { > + // SAFETY: We just created an `mmget` refcount. > + Some(unsafe { ARef::from_raw(NonNull::new_unchecked(self.as_raw().cast())) }) > + } else { > + None > + } > + } > +} > + > +// These methods require `mm_users` to be non-zero. > +impl MmWithUser { > + /// Obtain a reference from a raw pointer. > + /// > + /// # Safety > + /// > + /// The caller must ensure that `ptr` points at an `mm_struct`, and that `mm_users` remains > + /// non-zero for the duration of the lifetime 'a. > + #[inline] > + pub unsafe fn from_raw<'a>(ptr: *const bindings::mm_struct) -> &'a MmWithUser { > + // SAFETY: Caller promises that the pointer is valid for 'a. The layout is compatible due > + // to repr(transparent). > + unsafe { &*ptr.cast() } > + } > + > + /// Use `mmput_async` when dropping this refcount. > + pub fn use_mmput_async(me: ARef<MmWithUser>) -> ARef<MmWithUserAsync> { > + // SAFETY: The layouts and invariants are compatible. > + unsafe { ARef::from_raw(ARef::into_raw(me).cast()) } > + } > + > + /// Lock the mmap write lock. > + #[inline] > + pub fn mmap_write_lock(&self) -> MmapWriteLock<'_> { > + // SAFETY: The pointer is valid since self is a reference. > + unsafe { bindings::mmap_write_lock(self.as_raw()) }; > + > + // INVARIANT: We just acquired the write lock. > + MmapWriteLock { mm: self } > + } > + > + /// Lock the mmap read lock. > + #[inline] > + pub fn mmap_read_lock(&self) -> MmapReadLock<'_> { > + // SAFETY: The pointer is valid since self is a reference. > + unsafe { bindings::mmap_read_lock(self.as_raw()) }; > + > + // INVARIANT: We just acquired the read lock. > + MmapReadLock { mm: self } > + } > + > + /// Try to lock the mmap read lock. > + #[inline] > + pub fn mmap_read_trylock(&self) -> Option<MmapReadLock<'_>> { > + // SAFETY: The pointer is valid since self is a reference. > + let success = unsafe { bindings::mmap_read_trylock(self.as_raw()) }; > + > + if success { > + // INVARIANT: We just acquired the read lock. > + Some(MmapReadLock { mm: self }) > + } else { > + None > + } > + } > +} > + > +impl MmWithUserAsync { > + /// Use `mmput` when dropping this refcount. > + pub fn use_mmput(me: ARef<MmWithUserAsync>) -> ARef<MmWithUser> { > + // SAFETY: The layouts and invariants are compatible. > + unsafe { ARef::from_raw(ARef::into_raw(me).cast()) } > + } > +} > + > +/// A guard for the mmap read lock. > +/// > +/// # Invariants > +/// > +/// This `MmapReadLock` guard owns the mmap read lock. > +pub struct MmapReadLock<'a> { > + mm: &'a MmWithUser, > +} > + > +impl<'a> MmapReadLock<'a> { > + /// Look up a vma at the given address. > + #[inline] > + pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmArea> { > + // SAFETY: We hold a reference to the mm, so the pointer must be valid. Any value is okay > + // for `vma_addr`. > + let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr as _) }; > + > + if vma.is_null() { > + None > + } else { > + // SAFETY: We just checked that a vma was found, so the pointer is valid. Furthermore, > + // the returned area will borrow from this read lock guard, so it can only be used > + // while the read lock is still held. The returned reference is immutable, so the > + // reference cannot be used to modify the area. > + unsafe { Some(virt::VmArea::from_raw_vma(vma)) } > + } > + } > +} > + > +impl Drop for MmapReadLock<'_> { > + #[inline] > + fn drop(&mut self) { > + // SAFETY: We hold the read lock by the type invariants. > + unsafe { bindings::mmap_read_unlock(self.mm.as_raw()) }; > + } > +} > + > +/// A guard for the mmap write lock. > +/// > +/// # Invariants > +/// > +/// This `MmapReadLock` guard owns the mmap write lock. > +pub struct MmapWriteLock<'a> { > + mm: &'a MmWithUser, > +} > + > +impl<'a> MmapWriteLock<'a> { > + /// Look up a vma at the given address. > + #[inline] > + pub fn vma_lookup(&mut self, vma_addr: usize) -> Option<&mut virt::VmArea> { I think this needs to be -> Option<Pin<&mut virt::VmArea>>, otherwise, you could swap two VMAs (from different MMs) while the address stability is required by themselves (list_head) or others (list_head and rb_node): let vma1 = writer1.vma_lookup(x)?; let vma2 = writer2.vma_lookup(x)?; swap(vma1, vma2); > + // SAFETY: We hold a reference to the mm, so the pointer must be valid. Any value is okay > + // for `vma_addr`. > + let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr as _) }; > + > + if vma.is_null() { > + None > + } else { > + // SAFETY: We just checked that a vma was found, so the pointer is valid. Furthermore, > + // the returned area will borrow from this write lock guard, so it can only be used > + // while the write lock is still held. We hold the write lock, so mutable operations on > + // the area are okay. > + unsafe { Some(virt::VmArea::from_raw_vma_mut(vma)) } > + } > + } > +} > + > +impl Drop for MmapWriteLock<'_> { > + #[inline] > + fn drop(&mut self) { > + // SAFETY: We hold the write lock by the type invariants. > + unsafe { bindings::mmap_write_unlock(self.mm.as_raw()) }; > + } > +} (Will review the locking part later) Regards, Boqun [...]