Stephen Zhang <stephenzhangzsd@xxxxxxxxx> writes: > If the name of this function changes, you can easily > forget to modify the code in the corresponding place. > In fact, such errors already exist in spte_write_protect > and spte_clear_dirty. > What if we do something like (completely untested): diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index bfc6389edc28..5ec15e4160b1 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -12,7 +12,7 @@ extern bool dbg; #define pgprintk(x...) do { if (dbg) printk(x); } while (0) -#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) +#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0) #define MMU_WARN_ON(x) WARN_ON(x) #else #define pgprintk(x...) do { } while (0) and eliminate the need to pass '__func__,' explicitly? We can probably do the same to pgprintk(). > Signed-off-by: Stephen Zhang <stephenzhangzsd@xxxxxxxxx> > --- > arch/x86/kvm/mmu/mmu.c | 16 ++++++++-------- > 1 file changed, 8 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 6d16481..09462c3d 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -844,17 +844,17 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, > int i, count = 0; > > if (!rmap_head->val) { > - rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); > + rmap_printk("%s: %p %llx 0->1\n", __func__, spte, *spte); > rmap_head->val = (unsigned long)spte; > } else if (!(rmap_head->val & 1)) { > - rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); > + rmap_printk("%s: %p %llx 1->many\n", __func__, spte, *spte); > desc = mmu_alloc_pte_list_desc(vcpu); > desc->sptes[0] = (u64 *)rmap_head->val; > desc->sptes[1] = spte; > rmap_head->val = (unsigned long)desc | 1; > ++count; > } else { > - rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); > + rmap_printk("%s: %p %llx many->many\n", __func__, spte, *spte); > desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); > while (desc->sptes[PTE_LIST_EXT-1]) { > count += PTE_LIST_EXT; > @@ -1115,7 +1115,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) > !(pt_protect && spte_can_locklessly_be_made_writable(spte))) > return false; > > - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); > + rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep); > > if (pt_protect) > spte &= ~SPTE_MMU_WRITEABLE; > @@ -1142,7 +1142,7 @@ static bool spte_clear_dirty(u64 *sptep) > { > u64 spte = *sptep; > > - rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); > + rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep); > > MMU_WARN_ON(!spte_ad_enabled(spte)); > spte &= ~shadow_dirty_mask; > @@ -1184,7 +1184,7 @@ static bool spte_set_dirty(u64 *sptep) > { > u64 spte = *sptep; > > - rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); > + rmap_printk("%s: spte %p %llx\n", __func__, sptep, *sptep); > > /* > * Similar to the !kvm_x86_ops.slot_disable_log_dirty case, > @@ -1363,8 +1363,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, > > restart: > for_each_rmap_spte(rmap_head, &iter, sptep) { > - rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", > - sptep, *sptep, gfn, level); > + rmap_printk("%s: spte %p %llx gfn %llx (%d)\n", > + __func__, sptep, *sptep, gfn, level); > > need_flush = 1; -- Vitaly