Implement `Allocator` for `Vmalloc`, the kernel's virtually contiguous allocator, typically used for larger objects, (much) larger than page size. All memory allocations made with `Vmalloc` end up in `vrealloc()`. Signed-off-by: Danilo Krummrich <dakr@xxxxxxxxxx> --- rust/helpers.c | 8 ++++++++ rust/kernel/alloc/allocator.rs | 24 ++++++++++++++++++++++++ rust/kernel/alloc/allocator_test.rs | 1 + 3 files changed, 33 insertions(+) diff --git a/rust/helpers.c b/rust/helpers.c index 92d3c03ae1bd..4c628986f0c9 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -33,6 +33,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/workqueue.h> @@ -200,6 +201,13 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags) } EXPORT_SYMBOL_GPL(rust_helper_krealloc); +void * __must_check __realloc_size(2) +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags) +{ + return vrealloc(p, size, flags); +} +EXPORT_SYMBOL_GPL(rust_helper_vrealloc); + /* * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can * use it in contexts where Rust expects a `usize` like slice (array) indices. diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs index 397ae5bcc043..e9a3d0694f41 100644 --- a/rust/kernel/alloc/allocator.rs +++ b/rust/kernel/alloc/allocator.rs @@ -16,6 +16,12 @@ /// `bindings::krealloc`. pub struct Kmalloc; +/// The virtually contiguous kernel allocator. +/// +/// The vmalloc allocator allocates pages from the page level allocator and maps them into the +/// contiguous kernel virtual space. +pub struct Vmalloc; + /// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment. fn aligned_size(new_layout: Layout) -> usize { // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first. @@ -58,6 +64,10 @@ fn krealloc() -> Self { Self(bindings::krealloc) } + fn vrealloc() -> Self { + Self(bindings::vrealloc) + } + // SAFETY: `call` has the exact same safety requirements as `Allocator::realloc`. unsafe fn call( &self, @@ -136,6 +146,20 @@ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { } } +unsafe impl Allocator for Vmalloc { + unsafe fn realloc( + ptr: Option<NonNull<u8>>, + layout: Layout, + flags: Flags, + ) -> Result<NonNull<[u8]>, AllocError> { + let realloc = ReallocFunc::vrealloc(); + + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously + // allocated with this `Allocator`. + unsafe { realloc.call(ptr, layout, flags) } + } +} + #[global_allocator] static ALLOCATOR: Kmalloc = Kmalloc; diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs index 4785efc474a7..e7bf2982f68f 100644 --- a/rust/kernel/alloc/allocator_test.rs +++ b/rust/kernel/alloc/allocator_test.rs @@ -7,6 +7,7 @@ use core::ptr::NonNull; pub struct Kmalloc; +pub type Vmalloc = Kmalloc; unsafe impl Allocator for Kmalloc { unsafe fn realloc( -- 2.45.2