The new helper mktme_disable() allows to disable MKTME even if it's enumerated successfully. MKTME initialization may fail and this functionality allows system to boot regardless of the failure. MKTME needs per-KeyID direct mapping. It requires a lot more virtual address space which may be a problem in 4-level paging mode. If the system has more physical memory than we can handle with MKTME the feature allows to fail MKTME, but boot the system successfully. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- arch/x86/include/asm/mktme.h | 2 ++ arch/x86/kernel/cpu/intel.c | 5 +---- arch/x86/mm/mktme.c | 9 +++++++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/mktme.h b/arch/x86/include/asm/mktme.h index 44409b8bbaca..ebbee6a0c495 100644 --- a/arch/x86/include/asm/mktme.h +++ b/arch/x86/include/asm/mktme.h @@ -6,6 +6,8 @@ struct vm_area_struct; +void mktme_disable(void); + #ifdef CONFIG_X86_INTEL_MKTME extern phys_addr_t mktme_keyid_mask; extern int mktme_nr_keyids; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index efc9e9fc47d4..75e3b2602b4a 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -591,10 +591,7 @@ static void detect_tme(struct cpuinfo_x86 *c) * Maybe needed if there's inconsistent configuation * between CPUs. */ - physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; - mktme_keyid_mask = 0; - mktme_keyid_shift = 0; - mktme_nr_keyids = 0; + mktme_disable(); } #endif diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c index 1194496633ce..bb6210dbcf0e 100644 --- a/arch/x86/mm/mktme.c +++ b/arch/x86/mm/mktme.c @@ -13,6 +13,15 @@ static inline bool mktme_enabled(void) return static_branch_unlikely(&mktme_enabled_key); } +void mktme_disable(void) +{ + physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; + mktme_keyid_mask = 0; + mktme_keyid_shift = 0; + mktme_nr_keyids = 0; + static_branch_disable(&mktme_enabled_key); +} + int page_keyid(const struct page *page) { if (!mktme_enabled()) -- 2.18.0