1
0
Fork 0

drm/i915: Add support for mapping an object page by page

Introduced a new vm specfic callback insert_page() to program a single pte in
ggtt or ppgtt. This allows us to map a single page in to the mappable aperture
space. This can be iterated over to access the whole object by using space as
meagre as page size.

v2: Added low level rpm assertions to insert_page routines (Chris)

v3: Added POSTING_READ post register write (Tvrtko)

v4: Rebase (Ankit)

v5: Removed wmb() and FLUSH_CTL from insert_page, caller to take care
of it (Chris)

v6: insert_page not working correctly without FLSH_CNTL write, added the
write again.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
hifive-unleashed-5.1
Chris Wilson 2016-06-10 14:22:59 +05:30 committed by Tvrtko Ursulin
parent 4e50f79622
commit d6473f5664
4 changed files with 81 additions and 1 deletions

View File

@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
void intel_gtt_insert_page(dma_addr_t addr,
unsigned int pg,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
}
EXPORT_SYMBOL(intel_gtt_insert_page);
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)

View File

@ -2355,6 +2355,28 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
#endif
}
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
enum i915_cache_level level,
u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
@ -2424,6 +2446,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
enum i915_cache_level level,
u32 flags)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
iowrite32(vm->pte_encode(addr, level, true, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
@ -2543,6 +2587,24 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
enum i915_cache_level cache_level,
u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
@ -3076,7 +3138,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.insert_page = gen8_ggtt_insert_page;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv))
ggtt->base.clear_range = gen8_ggtt_clear_range;
@ -3116,6 +3178,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
ggtt->base.clear_range = gen6_ggtt_clear_range;
ggtt->base.insert_page = gen6_ggtt_insert_page;
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
@ -3147,6 +3210,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
&ggtt->mappable_base, &ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;

View File

@ -319,6 +319,11 @@ struct i915_address_space {
uint64_t start,
uint64_t length,
bool use_scratch);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,

View File

@ -13,6 +13,9 @@ void intel_gmch_remove(void);
bool intel_enable_gtt(void);
void intel_gtt_chipset_flush(void);
void intel_gtt_insert_page(dma_addr_t addr,
unsigned int pg,
unsigned int flags);
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);