On 18. 01. 24, 1:07, Yang Shi wrote:
This works around the problem, of course (but is a band-aid, not a fix):
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1829,7 +1829,7 @@ get_unmapped_area(struct file *file, unsigned long
addr, unsigned long len,
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
- } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
!in_32bit_syscall()) {
/* Ensures that larger anonymous mappings are THP
aligned. */
get_area = thp_get_unmapped_area;
}
thp_get_unmapped_area() does not take care of the legacy stuff...
Could you please help test the below patch? It is compiled, but I
don't have 32 bit userspace or machine to test it.
Yeah, for x86_64, it's semantically the same as the above, so this works
too:
Tested-by: Jiri Slaby <jirislaby@xxxxxxxxxx>
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -811,6 +811,9 @@ static unsigned long
__thp_get_unmapped_area(struct file *filp,
loff_t off_align = round_up(off, size);
unsigned long len_pad, ret;
+ if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+ return 0;
+
if (off_end <= off_align || (off_end - off_align) < size)
return 0;
thanks,
--
js
suse labs