From: Miaohe Lin <linmiaohe@xxxxxxxxxx> Fix some writing mistakes in the comments. And mmu_check_roots is a typo for mmu_check_root. Reviewed-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> --- virt/kvm/kvm_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3aa21bec028d..94ec01af708b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -964,7 +964,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, /* * Increment the new memslot generation a second time, dropping the - * update in-progress flag and incrementing then generation based on + * update in-progress flag and incrementing the generation based on * the number of address spaces. This provides a unique and easily * identifiable generation number while the memslots are in flux. */ @@ -1117,7 +1117,7 @@ int __kvm_set_memory_region(struct kvm *kvm, * * validation of sp->gfn happens in: * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) - * - kvm_is_visible_gfn (mmu_check_roots) + * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); @@ -1519,7 +1519,7 @@ static inline int check_user_page_hwpoison(unsigned long addr) /* * The fast path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. It's also the - * only part that runs if we can are in atomic context. + * only part that runs if we are in atomic context. */ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *pfn) -- 2.19.1