From: Vasily Gorbik <gor@xxxxxxxxxxxxx> Add exceptions handlers performing transparent transition of non-secure pages to secure (import) upon guest access and secure pages to non-secure (export) upon hypervisor access. Current assumption is that guest pages are pinned. Signed-off-by: Vasily Gorbik <gor@xxxxxxxxxxxxx> --- arch/s390/kernel/pgm_check.S | 4 +-- arch/s390/mm/fault.c | 64 ++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S index 59dee9d3bebf..27ac4f324c70 100644 --- a/arch/s390/kernel/pgm_check.S +++ b/arch/s390/kernel/pgm_check.S @@ -78,8 +78,8 @@ PGM_CHECK(do_dat_exception) /* 39 */ PGM_CHECK(do_dat_exception) /* 3a */ PGM_CHECK(do_dat_exception) /* 3b */ PGM_CHECK_DEFAULT /* 3c */ -PGM_CHECK_DEFAULT /* 3d */ -PGM_CHECK_DEFAULT /* 3e */ +PGM_CHECK(do_secure_storage_access) /* 3d */ +PGM_CHECK(do_non_secure_storage_access) /* 3e */ PGM_CHECK_DEFAULT /* 3f */ PGM_CHECK_DEFAULT /* 40 */ PGM_CHECK_DEFAULT /* 41 */ diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7b0bb475c166..0c4577472432 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -38,6 +38,7 @@ #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/facility.h> +#include <asm/uv.h> #include "../kernel/entry.h" #define __FAIL_ADDR_MASK -4096L @@ -816,3 +817,66 @@ static int __init pfault_irq_init(void) early_initcall(pfault_irq_init); #endif /* CONFIG_PFAULT */ + +#ifdef CONFIG_KVM_S390_PROTECTED_VIRTUALIZATION_HOST + +void do_secure_storage_access(struct pt_regs *regs) +{ + unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK; + struct vm_area_struct *vma; + struct mm_struct *mm; + struct page *page; + + switch (get_fault_type(regs)) { + case USER_FAULT: + mm = current->mm; + down_read(&mm->mmap_sem); + vma = find_vma(mm, addr); + if (!vma) { + up_read(&mm->mmap_sem); + do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); + break; + } + page = follow_page(vma, addr, FOLL_GET); + if (IS_ERR_OR_NULL(page)) { + up_read(&mm->mmap_sem); + break; + } + uv_convert_from_secure(page_to_phys(page)); + put_page(page); + up_read(&mm->mmap_sem); + break; + case KERNEL_FAULT: + uv_convert_from_secure(__pa(addr)); + break; + case VDSO_FAULT: + /* fallthrough */ + case GMAP_FAULT: + /* fallthrough */ + default: + do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP); + WARN_ON_ONCE(1); + } +} +NOKPROBE_SYMBOL(do_secure_storage_access); + +void do_non_secure_storage_access(struct pt_regs *regs) +{ + unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; + struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; + + uv_convert_to_secure(gmap, gaddr); +} +NOKPROBE_SYMBOL(do_non_secure_storage_access); + +#else +void do_secure_storage_access(struct pt_regs *regs) +{ + default_trap_handler(regs); +} + +void do_non_secure_storage_access(struct pt_regs *regs) +{ + default_trap_handler(regs); +} +#endif -- 2.20.1