From: Vasily Gorbik <gor@xxxxxxxxxxxxx> Before being able to host protected virtual machines, donate some of the memory to the ultravisor. Besides that the ultravisor might impose addressing limitations for memory used to back protected VM storage. Treat that limit as protected virtualization host's virtual memory limit. Signed-off-by: Vasily Gorbik <gor@xxxxxxxxxxxxx> Reviewed-by: Christian Borntraeger <borntraeger@xxxxxxxxxx> --- arch/s390/include/asm/uv.h | 16 ++++++++++++ arch/s390/kernel/setup.c | 3 +++ arch/s390/kernel/uv.c | 53 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+) diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 32eac3ab2d3b..cdf2fd71d7ab 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -23,12 +23,14 @@ #define UVC_RC_NO_RESUME 0x0007 #define UVC_CMD_QUI 0x0001 +#define UVC_CMD_INIT_UV 0x000f #define UVC_CMD_SET_SHARED_ACCESS 0x1000 #define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001 /* Bits in installed uv calls */ enum uv_cmds_inst { BIT_UVC_CMD_QUI = 0, + BIT_UVC_CMD_INIT_UV = 1, BIT_UVC_CMD_SET_SHARED_ACCESS = 8, BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9, }; @@ -59,6 +61,15 @@ struct uv_cb_qui { u64 reserved98; } __packed __aligned(8); +struct uv_cb_init { + struct uv_cb_header header; + u64 reserved08[2]; + u64 stor_origin; + u64 stor_len; + u64 reserved28[4]; + +} __packed __aligned(8); + struct uv_cb_share { struct uv_cb_header header; u64 reserved08[3]; @@ -158,8 +169,13 @@ static inline int is_prot_virt_host(void) { return prot_virt_host; } + +void setup_uv(void); +void adjust_to_uv_max(unsigned long *vmax); #else #define is_prot_virt_host() 0 +static inline void setup_uv(void) {} +static inline void adjust_to_uv_max(unsigned long *vmax) {} #endif #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || \ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f2ab2528859f..5f178d557cc8 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -560,6 +560,8 @@ static void __init setup_memory_end(void) vmax = _REGION1_SIZE; /* 4-level kernel page table */ } + adjust_to_uv_max(&vmax); + /* module area is at the end of the kernel address space. */ MODULES_END = vmax; MODULES_VADDR = MODULES_END - MODULES_LEN; @@ -1140,6 +1142,7 @@ void __init setup_arch(char **cmdline_p) */ memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); + setup_uv(); setup_memory_end(); setup_memory(); dma_contiguous_reserve(memory_end); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 35ce89695509..f7778493e829 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -45,4 +45,57 @@ static int __init prot_virt_setup(char *val) return rc; } early_param("prot_virt", prot_virt_setup); + +static int __init uv_init(unsigned long stor_base, unsigned long stor_len) +{ + struct uv_cb_init uvcb = { + .header.cmd = UVC_CMD_INIT_UV, + .header.len = sizeof(uvcb), + .stor_origin = stor_base, + .stor_len = stor_len, + }; + int cc; + + cc = uv_call(0, (uint64_t)&uvcb); + if (cc || uvcb.header.rc != UVC_RC_EXECUTED) { + pr_err("Ultravisor init failed with cc: %d rc: 0x%hx\n", cc, + uvcb.header.rc); + return -1; + } + return 0; +} + +void __init setup_uv(void) +{ + unsigned long uv_stor_base; + + if (!prot_virt_host) + return; + + uv_stor_base = (unsigned long)memblock_alloc_try_nid( + uv_info.uv_base_stor_len, SZ_1M, SZ_2G, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); + if (!uv_stor_base) { + pr_info("Failed to reserve %lu bytes for ultravisor base storage\n", + uv_info.uv_base_stor_len); + goto fail; + } + + if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) { + memblock_free(uv_stor_base, uv_info.uv_base_stor_len); + goto fail; + } + + pr_info("Reserving %luMB as ultravisor base storage\n", + uv_info.uv_base_stor_len >> 20); + return; +fail: + prot_virt_host = 0; +} + +void adjust_to_uv_max(unsigned long *vmax) +{ + if (prot_virt_host && *vmax > uv_info.max_sec_stor_addr) + *vmax = uv_info.max_sec_stor_addr; +} #endif -- 2.24.0