Re: [PATCH v7 01/12] KVM: arm64: Rename free_removed to free_unlinked

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 4/9/23 2:29 PM, Ricardo Koller wrote:
Normalize on referring to tables outside of an active paging structure
as 'unlinked'.

A subsequent change to KVM will add support for building page tables
that are not part of an active paging structure. The existing
'removed_table' terminology is quite clunky when applied in this
context.

Signed-off-by: Ricardo Koller <ricarkol@xxxxxxxxxx>
Reviewed-by: Oliver Upton <oliver.upton@xxxxxxxxx>
Reviewed-by: Shaoqin Huang <shahuang@xxxxxxxxxx>
---
  arch/arm64/include/asm/kvm_pgtable.h  |  8 ++++----
  arch/arm64/kvm/hyp/nvhe/mem_protect.c |  6 +++---
  arch/arm64/kvm/hyp/pgtable.c          |  6 +++---
  arch/arm64/kvm/mmu.c                  | 10 +++++-----
  4 files changed, 15 insertions(+), 15 deletions(-)


Reviewed-by: Gavin Shan <gshan@xxxxxxxxxx>

diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 4cd6762bda805..26a4293726c14 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -104,7 +104,7 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
   *				allocation is physically contiguous.
   * @free_pages_exact:		Free an exact number of memory pages previously
   *				allocated by zalloc_pages_exact.
- * @free_removed_table:		Free a removed paging structure by unlinking and
+ * @free_unlinked_table:	Free an unlinked paging structure by unlinking and
   *				dropping references.
   * @get_page:			Increment the refcount on a page.
   * @put_page:			Decrement the refcount on a page. When the
@@ -124,7 +124,7 @@ struct kvm_pgtable_mm_ops {
  	void*		(*zalloc_page)(void *arg);
  	void*		(*zalloc_pages_exact)(size_t size);
  	void		(*free_pages_exact)(void *addr, size_t size);
-	void		(*free_removed_table)(void *addr, u32 level);
+	void		(*free_unlinked_table)(void *addr, u32 level);
  	void		(*get_page)(void *addr);
  	void		(*put_page)(void *addr);
  	int		(*page_count)(void *addr);
@@ -440,7 +440,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
  void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/**
- * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
+ * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
   * @mm_ops:	Memory management callbacks.
   * @pgtable:	Unlinked stage-2 paging structure to be freed.
   * @level:	Level of the stage-2 paging structure to be freed.
@@ -448,7 +448,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
   * The page-table is assumed to be unreachable by any hardware walkers prior to
   * freeing and therefore no TLB invalidation is performed.
   */
-void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
/**
   * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 552653fa18be3..b030170d803b6 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -91,9 +91,9 @@ static void host_s2_put_page(void *addr)
  	hyp_put_page(&host_s2_pool, addr);
  }
-static void host_s2_free_removed_table(void *addr, u32 level)
+static void host_s2_free_unlinked_table(void *addr, u32 level)
  {
-	kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
+	kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
  }
static int prepare_s2_pool(void *pgt_pool_base)
@@ -110,7 +110,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
  	host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
  		.zalloc_pages_exact = host_s2_zalloc_pages_exact,
  		.zalloc_page = host_s2_zalloc_page,
-		.free_removed_table = host_s2_free_removed_table,
+		.free_unlinked_table = host_s2_free_unlinked_table,
  		.phys_to_virt = hyp_phys_to_virt,
  		.virt_to_phys = hyp_virt_to_phys,
  		.page_count = hyp_page_count,
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 3d61bd3e591d2..a3246d6cddec7 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -860,7 +860,7 @@ static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
  	if (ret)
  		return ret;
- mm_ops->free_removed_table(childp, ctx->level);
+	mm_ops->free_unlinked_table(childp, ctx->level);
  	return 0;
  }
@@ -905,7 +905,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
   * The TABLE_PRE callback runs for table entries on the way down, looking
   * for table entries which we could conceivably replace with a block entry
   * for this mapping. If it finds one it replaces the entry and calls
- * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
+ * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
   *
   * Otherwise, the LEAF callback performs the mapping at the existing leaves
   * instead.
@@ -1276,7 +1276,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
  	pgt->pgd = NULL;
  }
-void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
  {
  	kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
  	struct kvm_pgtable_walker walker = {
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7113587222ffe..efdaab3f154de 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -131,21 +131,21 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size)
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops; -static void stage2_free_removed_table_rcu_cb(struct rcu_head *head)
+static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
  {
  	struct page *page = container_of(head, struct page, rcu_head);
  	void *pgtable = page_to_virt(page);
  	u32 level = page_private(page);
- kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level);
+	kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
  }
-static void stage2_free_removed_table(void *addr, u32 level)
+static void stage2_free_unlinked_table(void *addr, u32 level)
  {
  	struct page *page = virt_to_page(addr);
set_page_private(page, (unsigned long)level);
-	call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb);
+	call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb);
  }
static void kvm_host_get_page(void *addr)
@@ -682,7 +682,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
  	.zalloc_page		= stage2_memcache_zalloc_page,
  	.zalloc_pages_exact	= kvm_s2_zalloc_pages_exact,
  	.free_pages_exact	= kvm_s2_free_pages_exact,
-	.free_removed_table	= stage2_free_removed_table,
+	.free_unlinked_table	= stage2_free_unlinked_table,
  	.get_page		= kvm_host_get_page,
  	.put_page		= kvm_s2_put_page,
  	.page_count		= kvm_host_page_count,





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux