From: Hugh Dickins <hughd@xxxxxxxxxx> kaiser_add_user_map() took no notice when kaiser_pagetable_walk() failed. And avoid its might_sleep() when atomic (though atomic at present unused). Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx> Acked-by: Jiri Kosina <jkosina@xxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> (cherry picked from commit 407c3ff6a24c7cb418b77a124d17e282f9622037) Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx> Conflicts: arch/x86/mm/kaiser.c --- arch/x86/mm/kaiser.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c index df7f6591d5aa..058d0886086b 100644 --- a/arch/x86/mm/kaiser.c +++ b/arch/x86/mm/kaiser.c @@ -99,11 +99,11 @@ static pte_t *kaiser_pagetable_walk(unsigned long address, bool is_atomic) pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address)); gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); - might_sleep(); if (is_atomic) { gfp &= ~GFP_KERNEL; gfp |= __GFP_HIGH; - } + } else + might_sleep(); if (pgd_none(*pgd)) { WARN_ONCE(1, "All shadow pgds should have been populated"); @@ -160,13 +160,17 @@ int kaiser_add_user_map(const void *__start_addr, unsigned long size, unsigned long end_addr = PAGE_ALIGN(start_addr + size); unsigned long target_address; - for (;address < end_addr; address += PAGE_SIZE) { + for (; address < end_addr; address += PAGE_SIZE) { target_address = get_pa_from_mapping(address); if (target_address == -1) { ret = -EIO; break; } pte = kaiser_pagetable_walk(address, false); + if (!pte) { + ret = -ENOMEM; + break; + } if (pte_none(*pte)) { set_pte(pte, __pte(flags | target_address)); } else { -- 2.16.2