1
0
Fork 0

drm/i915/gtt: Rename i915_vm_is_48b to i915_vm_is_4lvl

Large ppGTT are differentiated by the requirement to go to four levels
to address more than 32b. Given the introduction of more 4 level ppGTT
with different sizes of addressable bits, rename i915_vm_is_48b() to
better reflect the commonality of using 4 levels.

Based on a patch by Bob Paauwe.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Bob Paauwe <bob.j.paauwe@intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190314223839.28258-4-chris@chris-wilson.co.uk
hifive-unleashed-5.2
Chris Wilson 2019-03-14 22:38:38 +00:00
parent 51d623b675
commit a9fe9ca44c
6 changed files with 21 additions and 26 deletions

View File

@ -1101,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
if (i915_vm_is_48bit(&i915_ppgtt->vm))
if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
else {
} else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@ -1154,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
if (i915_vm_is_48bit(&i915_ppgtt->vm))
if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)

View File

@ -321,7 +321,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;

View File

@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
size = I915_GTT_PAGE_SIZE_64K;
gfp |= __GFP_NOWARN;
@ -727,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL;
}
static inline bool use_4lvl(const struct i915_address_space *vm)
{
return i915_vm_is_48bit(vm);
}
static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
GEM_BUG_ON(!use_4lvl(vm));
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@ -767,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
{
__pdp_fini(pdp);
if (!use_4lvl(vm))
if (!i915_vm_is_4lvl(vm))
return;
cleanup_px(vm, pdp);
@ -871,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
gen8_ppgtt_pdpe_t *vaddr;
pdp->page_directory[pdpe] = pd;
if (!use_4lvl(vm))
if (!i915_vm_is_4lvl(vm))
return;
vaddr = kmap_atomic_px(pdp);
@ -936,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
unsigned int pml4e;
GEM_BUG_ON(!use_4lvl(vm));
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(pdp == vm->scratch_pdp);
@ -1247,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
goto free_pt;
}
if (use_4lvl(vm)) {
if (i915_vm_is_4lvl(vm)) {
vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
@ -1257,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
if (use_4lvl(vm))
if (i915_vm_is_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
@ -1279,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg;
int i;
if (use_4lvl(vm)) {
if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@ -1309,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
if (!vm->scratch_page.daddr)
return;
if (use_4lvl(vm))
if (i915_vm_is_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
@ -1355,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (use_4lvl(vm))
if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
@ -1555,7 +1550,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
if (use_4lvl(&ppgtt->vm)) {
if (i915_vm_is_4lvl(&ppgtt->vm)) {
err = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (err)
goto err_scratch;

View File

@ -348,7 +348,7 @@ struct i915_address_space {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool
i915_vm_is_48bit(const struct i915_address_space *vm)
i915_vm_is_4lvl(const struct i915_address_space *vm)
{
return (vm->total - 1) >> 32;
}
@ -488,7 +488,7 @@ static inline u32 gen6_pde_index(u32 addr)
static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm)
{
if (i915_vm_is_48bit(vm))
if (i915_vm_is_4lvl(vm))
return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES;

View File

@ -1499,7 +1499,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@ -2719,7 +2719,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
if (i915_vm_is_48bit(&ppgtt->vm)) {
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.

View File

@ -1449,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@ -1719,7 +1719,7 @@ int i915_gem_huge_page_mock_selftests(void)
goto out_unlock;
}
if (!i915_vm_is_48bit(&ppgtt->vm)) {
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;