}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -799,8 +848,9 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_entry *pd;
uint64_t temp;
uint32_t pdpe;
+ size_t pdpes = I915_PDPES_PER_PDP(ppgtt->base.dev);
- BUG_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
+ BUG_ON(!bitmap_empty(new_pds, pdpes));
/* FIXME: PPGTT container_of won't work for 64b */
BUG_ON((start + length) > 0x800000000ULL);
@@ -820,18 +870,19 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
return 0;
unwind_out:
- for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
+ for_each_set_bit(pdpe, new_pds, pdpes)
unmap_and_free_pd(pdp->page_directory[pdpe], ppgtt->base.dev);
return -ENOMEM;
}
static inline void
-free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts)
+free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts,
+ size_t pdpes)
{
int i;
- for (i = 0; i < GEN8_LEGACY_PDPES; i++)
+ for (i = 0; i < pdpes; i++)
kfree(new_pts[i]);
kfree(new_pts);
kfree(new_pds);
@@ -841,13 +892,14 @@ free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts)
* of these are based on the number of PDPEs in the system.
*/
int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
- unsigned long ***new_pts)
+ unsigned long ***new_pts,
+ size_t pdpes)
{
int i;
unsigned long *pds;
unsigned long **pts;
- pds = kcalloc(BITS_TO_LONGS(GEN8_LEGACY_PDPES), sizeof(unsigned long), GFP_KERNEL);
+ pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_KERNEL);
if (!pds)
return -ENOMEM;
@@ -857,7 +909,7 @@ int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
return -ENOMEM;
}
- for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+ for (i = 0; i < pdpes; i++) {
pts[i] = kcalloc(BITS_TO_LONGS(GEN8_PDES_PER_PAGE),
sizeof(unsigned long), GFP_KERNEL);
if (!pts[i])
@@ -870,7 +922,7 @@ int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
return 0;
err_out:
- free_gen8_temp_bitmaps(pds, pts);
+ free_gen8_temp_bitmaps(pds, pts, pdpes);
return -ENOMEM;
}
@@ -886,6 +938,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
const uint64_t orig_length = length;
uint64_t temp;
uint32_t pdpe;
+ size_t pdpes = I915_PDPES_PER_PDP(dev);
int ret;
#ifndef CONFIG_64BIT
@@ -903,7 +956,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
if (WARN_ON(start + length < start))
return -ERANGE;
- ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
+ ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
if (ret)
return ret;
@@ -911,7 +964,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length,
new_page_dirs);
if (ret) {
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
return ret;
}
@@ -968,7 +1021,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
set_bit(pdpe, ppgtt->pdp.used_pdpes);
}
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
return 0;
err_out:
@@ -977,13 +1030,19 @@ err_out:
unmap_and_free_pt(pd->page_tables[temp], vm->dev);
}
- for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
+ for_each_set_bit(pdpe, new_page_dirs, pdpes)
unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
return ret;
}
+static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
+{
+ unmap_and_free_pt(ppgtt->scratch_pd, ppgtt->base.dev);
+ unmap_and_free_pdp(&ppgtt->pdp, ppgtt->base.dev);
+}
+
/**
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1004,6 +1063,15 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->switch_mm = gen8_mm_switch;
+ if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ int ret = __pdp_init(&ppgtt->pdp, false);
+ if (ret) {
+ unmap_and_free_pt(ppgtt->scratch_pd, ppgtt->base.dev);
+ return ret;
+ }
+ } else
+ return -EPERM; /* Not yet implemented */
+
return 0;
}
@@ -1025,7 +1093,7 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* eventually. */
ret = gen8_alloc_va_range(&ppgtt->base, start, size);
if (ret) {
- unmap_and_free_pt(ppgtt->scratch_pd, ppgtt->base.dev);
+ gen8_ppgtt_fini_common(ppgtt);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c68ec3a..a33c6e9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -85,8 +85,12 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
*/
+#define GEN8_PML4ES_PER_PML4 512
+#define GEN8_PML4E_SHIFT 39
#define GEN8_PDPE_SHIFT 30
-#define GEN8_PDPE_MASK 0x3
+/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
+ * tables */
+#define GEN8_PDPE_MASK 0x1ff
#define GEN8_PDE_SHIFT 21
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
@@ -95,6 +99,13 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#ifdef CONFIG_64BIT
+# define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
+ GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+#else
+# define I915_PDPES_PER_PDP GEN8_LEGACY_PDPES
+#endif
+
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
@@ -210,9 +221,17 @@ struct i915_page_directory_entry {
};
struct i915_page_directory_pointer_entry {
- /* struct page *page; */
- DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES);
- struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
+ struct page *page;
+ dma_addr_t daddr;
+ unsigned long *used_pdpes;
+ struct i915_page_directory_entry **page_directory;
+};
+
+struct i915_pml4 {
+ struct page *page;
+ dma_addr_t daddr;
+ DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
+ struct i915_page_directory_pointer_entry *pdps[GEN8_PML4ES_PER_PML4];
};
struct i915_address_space {
@@ -302,8 +321,9 @@ struct i915_hw_ppgtt {
struct drm_mm_node node;
unsigned long pd_dirty_rings;
union {
- struct i915_page_directory_pointer_entry pdp;
- struct i915_page_directory_entry pd;
+ struct i915_pml4 pml4; /* GEN8+ & 64b PPGTT */
+ struct i915_page_directory_pointer_entry pdp; /* GEN8+ */
+ struct i915_page_directory_entry pd; /* GEN6-7 */
};
union {
@@ -399,14 +419,17 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
temp = min(temp, length), \
start += temp, length -= temp)
-#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
- for (iter = gen8_pdpe_index(start), pd = (pdp)->page_directory[iter]; \
- length > 0 && iter < GEN8_LEGACY_PDPES; \
+#define gen8_for_each_pdpe_e(pd, pdp, start, length, temp, iter, b) \
+ for (iter = gen8_pdpe_index(start), pd = (pdp)->page_directory[iter]; \
+ length > 0 && (iter < b); \
pd = (pdp)->page_directory[++iter], \
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
+#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
+ gen8_for_each_pdpe_e(pd, pdp, start, length, temp, iter, I915_PDPES_PER_PDP(dev))
+
/* Clamp length to the next page_directory boundary */
static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length)
{
--
2.1.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx