Add a description for the kAPI functions inside intel_tlb.c. Signed-off-by: Mauro Carvalho Chehab <mchehab@xxxxxxxxxx> --- To avoid mailbombing on a large number of people, only mailing lists were C/C on the cover. See [PATCH v2 00/21] at: https://lore.kernel.org/all/cover.1657800199.git.mchehab@xxxxxxxxxx/ drivers/gpu/drm/i915/gt/intel_tlb.c | 36 +++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c index 15ed83226676..aa2e0086ae88 100644 --- a/drivers/gpu/drm/i915/gt/intel_tlb.c +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c @@ -146,6 +146,18 @@ static void mmio_invalidate_full(struct intel_gt *gt) intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); } +/** + * intel_gt_invalidate_tlb_full - do full TLB cache invalidation + * @gt: GT structure + * @seqno: sequence number + * + * Do a full TLB cache invalidation if the @seqno is bigger than the last + * full TLB cache invalidation. + * + * Note: + * The TLB cache invalidation logic depends on GEN-specific registers. + * It currently supports GEN8 to GEN12 and GuC-based TLB cache invalidation. + */ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) { intel_wakeref_t wakeref; @@ -220,6 +232,17 @@ static bool mmio_invalidate_range(struct intel_gt *gt, u64 start, u64 length) return err == 0; } +/** + * intel_gt_invalidate_tlb_range - do full TLB cache invalidation + * @gt: GT structure + * @start: range start + * @length: range length + * + * Do a selected TLB cache invalidation on a range pointed by @start + * with @length size. + * + * Only some GuC-based GPUs can do a selective cache invalidation. + */ bool intel_gt_invalidate_tlb_range(struct intel_gt *gt, u64 start, u64 length) { @@ -247,12 +270,25 @@ bool intel_gt_invalidate_tlb_range(struct intel_gt *gt, return true; } +/** + * intel_gt_init_tlb - initialize TLB-specific vars + * @gt: GT structure + * + * TLB cache invalidation logic internally uses some resources that require + * initialization. Should be called before doing any TLB cache invalidation. + */ void intel_gt_init_tlb(struct intel_gt *gt) { mutex_init(>->tlb.invalidate_lock); seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); } +/** + * intel_gt_fini_tlb - initialize TLB-specific vars + * @gt: GT structure + * + * Frees any resources needed by TLB cache invalidation logic. + */ void intel_gt_fini_tlb(struct intel_gt *gt) { mutex_destroy(>->tlb.invalidate_lock); -- 2.36.1