From: Xiaoyao Li <xiaoyao.li@xxxxxxxxx> tdh_mem_page_aug() will support 2MB large page in the near future. Cache flush also needs to be 2MB instead of 4KB in such cases. Introduce a helper function to flush cache with page size info in preparation for large pages. Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Reviewed-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx> --- v6: - catch up tdx_seamcall() change --- arch/x86/kvm/vmx/tdx_ops.h | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h index d27f281152cb..3af124711e98 100644 --- a/arch/x86/kvm/vmx/tdx_ops.h +++ b/arch/x86/kvm/vmx/tdx_ops.h @@ -6,6 +6,7 @@ #include <linux/compiler.h> +#include <asm/pgtable_types.h> #include <asm/cacheflush.h> #include <asm/asm.h> #include <asm/kvm_host.h> @@ -50,6 +51,11 @@ static inline int pg_level_to_tdx_sept_level(enum pg_level level) return level - 1; } +static inline void tdx_clflush_page(hpa_t addr, enum pg_level level) +{ + clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level)); +} + /* * TDX module acquires its internal lock for resources. It doesn't spin to get * locks because of its restrictions of allowed execution time. Instead, it @@ -87,7 +93,7 @@ static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr) .rdx = tdr, }; - clflush_cache_range(__va(addr), PAGE_SIZE); + tdx_clflush_page(addr, PG_LEVEL_4K); return tdx_seamcall(TDH_MNG_ADDCX, &in, NULL); } @@ -101,7 +107,7 @@ static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source .r9 = source, }; - clflush_cache_range(__va(hpa), PAGE_SIZE); + tdx_clflush_page(hpa, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_PAGE_ADD, &in, out); } @@ -114,7 +120,7 @@ static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page, .r8 = page, }; - clflush_cache_range(__va(page), PAGE_SIZE); + tdx_clflush_page(page, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_SEPT_ADD, &in, out); } @@ -147,7 +153,7 @@ static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr) .rdx = tdvpr, }; - clflush_cache_range(__va(addr), PAGE_SIZE); + tdx_clflush_page(addr, PG_LEVEL_4K); return tdx_seamcall(TDH_VP_ADDCX, &in, NULL); } @@ -160,7 +166,7 @@ static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa, .r8 = hpa, }; - clflush_cache_range(__va(hpa), PAGE_SIZE); + tdx_clflush_page(hpa, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, &in, out); } @@ -173,7 +179,7 @@ static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa, .r8 = hpa, }; - clflush_cache_range(__va(hpa), PAGE_SIZE); + tdx_clflush_page(hpa, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, &in, out); } @@ -204,7 +210,7 @@ static inline u64 tdh_mng_create(hpa_t tdr, int hkid) .rdx = hkid, }; - clflush_cache_range(__va(tdr), PAGE_SIZE); + tdx_clflush_page(tdr, PG_LEVEL_4K); return tdx_seamcall(TDH_MNG_CREATE, &in, NULL); } @@ -215,7 +221,7 @@ static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr) .rdx = tdr, }; - clflush_cache_range(__va(tdvpr), PAGE_SIZE); + tdx_clflush_page(tdvpr, PG_LEVEL_4K); return tdx_seamcall(TDH_VP_CREATE, &in, NULL); } -- 2.25.1