Since page migration / swapping is not supported yet, MFD_INACCESSIBLE memory behave like longterm pinned pages and thus should be accounted to mm->pinned_vm and be restricted by RLIMIT_MEMLOCK. Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> --- mm/shmem.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index 7b43e274c9a2..ae46fb96494b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -915,14 +915,17 @@ static void notify_fallocate(struct inode *inode, pgoff_t start, pgoff_t end) static void notify_invalidate_page(struct inode *inode, struct folio *folio, pgoff_t start, pgoff_t end) { -#ifdef CONFIG_MEMFILE_NOTIFIER struct shmem_inode_info *info = SHMEM_I(inode); +#ifdef CONFIG_MEMFILE_NOTIFIER start = max(start, folio->index); end = min(end, folio->index + folio_nr_pages(folio)); memfile_notifier_invalidate(&info->memfile_notifiers, start, end); #endif + + if (info->xflags & SHM_F_INACCESSIBLE) + atomic64_sub(end - start, ¤t->mm->pinned_vm); } /* @@ -2680,6 +2683,20 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) return offset; } +static bool memlock_limited(unsigned long npages) +{ + unsigned long lock_limit; + unsigned long pinned; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + pinned = atomic64_add_return(npages, ¤t->mm->pinned_vm); + if (pinned > lock_limit && !capable(CAP_IPC_LOCK)) { + atomic64_sub(npages, ¤t->mm->pinned_vm); + return true; + } + return false; +} + static long shmem_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { @@ -2753,6 +2770,12 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, goto out; } + if ((info->xflags & SHM_F_INACCESSIBLE) && + memlock_limited(end - start)) { + error = -ENOMEM; + goto out; + } + shmem_falloc.waitq = NULL; shmem_falloc.start = start; shmem_falloc.next = start; -- 2.17.1