Add some additional vma_internal.h stubs in preparation for __mmap_region() being moved to mm/vma.c. Without these the move would result in the tests no longer compiling. Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> --- tools/testing/vma/vma_internal.h | 115 ++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index c5b9da034511..e76ff579e1fd 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -44,7 +44,9 @@ #define VM_LOCKED 0x00002000 #define VM_IO 0x00004000 #define VM_DONTEXPAND 0x00040000 +#define VM_LOCKONFAULT 0x00080000 #define VM_ACCOUNT 0x00100000 +#define VM_NORESERVE 0x00200000 #define VM_MIXEDMAP 0x10000000 #define VM_STACK VM_GROWSDOWN #define VM_SHADOW_STACK VM_NONE @@ -53,6 +55,14 @@ #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) +/* This mask represents all the VMA flag bits used by mlock */ +#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) + +#ifdef CONFIG_64BIT +/* VM is sealed, in vm_flags */ +#define VM_SEALED _BITUL(63) +#endif + #define FIRST_USER_ADDRESS 0UL #define USER_PGTABLES_CEILING 0UL @@ -698,8 +708,9 @@ static inline void tlb_finish_mmu(struct mmu_gather *) { } -static inline void get_file(struct file *) +static inline struct file *get_file(struct file *f) { + return f; } static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *) @@ -920,4 +931,106 @@ static inline bool signal_pending(void *) return false; } +static inline bool is_file_hugepages(struct file *) +{ + return false; +} + +static inline int security_vm_enough_memory_mm(struct mm_struct *, long) +{ + return true; +} + +static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long) +{ + return true; +} + +static inline void vm_flags_init(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma->__vm_flags = flags; +} + +static inline void vm_flags_set(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma_start_write(vma); + vma->__vm_flags |= flags; +} + +static inline void vm_flags_clear(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma_start_write(vma); + vma->__vm_flags &= ~flags; +} + +static inline int call_mmap(struct file *, struct vm_area_struct *) +{ + return 0; +} + +static inline int shmem_zero_setup(struct vm_area_struct *) +{ + return 0; +} + +static inline void vma_set_anonymous(struct vm_area_struct *vma) +{ + vma->vm_ops = NULL; +} + +static inline void ksm_add_vma(struct vm_area_struct *) +{ +} + +static inline void perf_event_mmap(struct vm_area_struct *) +{ +} + +static inline bool vma_is_dax(struct vm_area_struct *) +{ + return false; +} + +static inline struct vm_area_struct *get_gate_vma(struct mm_struct *) +{ + return NULL; +} + +bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); + +/* Update vma->vm_page_prot to reflect vma->vm_flags. */ +static inline void vma_set_page_prot(struct vm_area_struct *vma) +{ + unsigned long vm_flags = vma->vm_flags; + pgprot_t vm_page_prot; + + /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ + vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); + + if (vma_wants_writenotify(vma, vm_page_prot)) { + vm_flags &= ~VM_SHARED; + /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ + vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags)); + } + /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ + WRITE_ONCE(vma->vm_page_prot, vm_page_prot); +} + +static inline bool arch_validate_flags(unsigned long) +{ + return true; +} + +static inline void vma_close(struct vm_area_struct *) +{ +} + +static inline int mmap_file(struct file *, struct vm_area_struct *) +{ + return 0; +} + #endif /* __MM_VMA_INTERNAL_H */ -- 2.47.0