1
0
Fork 0

drm/i915: switch intel_uncore_forcewake_for_reg to intel_uncore

The intel_uncore structure is the owner of FW, so subclass the
function to it.

While at it, use a local uncore var and switch to the new read/write
functions where it makes sense.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-7-daniele.ceraolospurio@intel.com
hifive-unleashed-5.2
Daniele Ceraolo Spurio 2019-03-25 14:49:37 -07:00 committed by Chris Wilson
parent a2b4abfc62
commit 4319382e9b
8 changed files with 47 additions and 44 deletions

View File

@ -327,6 +327,7 @@ out:
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
@ -351,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(&dev_priv->uncore, fw);
intel_uncore_forcewake_get(uncore, fw);
I915_WRITE_FW(reg, 0x1);
intel_uncore_write_fw(uncore, reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(&dev_priv->uncore, fw);
intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}

View File

@ -888,6 +888,7 @@ static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@ -909,33 +910,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
spin_lock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains);
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
ret = I915_READ_FW(reg);
ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask;
mcr |= default_mcr_s_ss_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains);
spin_unlock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
return ret;
}

View File

@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}

View File

@ -9959,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u64 time_hw, prev_hw, overflow_hw;
unsigned int fw_domains;
unsigned long flags;
@ -9980,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
return 0;
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains);
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@ -10002,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
}
overflow_hw = BIT_ULL(32);
time_hw = I915_READ_FW(reg);
time_hw = intel_uncore_read_fw(uncore, reg);
}
/*
@ -10024,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div);
}

View File

@ -1841,7 +1841,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
{
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned fw =
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
u32 reg_value;
int ret;
@ -1904,23 +1904,23 @@ out:
}
static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) {
if (INTEL_GEN(i915) >= 11) {
fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
} else if (HAS_FWTABLE(dev_priv)) {
} else if (HAS_FWTABLE(i915)) {
fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
} else if (INTEL_GEN(dev_priv) >= 6) {
} else if (INTEL_GEN(i915) >= 6) {
fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
} else {
/* on devices with FW we expect to hit one of the above cases */
if (intel_uncore_has_forcewake(uncore))
MISSING_CASE(INTEL_GEN(dev_priv));
MISSING_CASE(INTEL_GEN(i915));
fw_domains = 0;
}
@ -1931,25 +1931,25 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
}
static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) {
if (INTEL_GEN(i915) >= 11) {
fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
} else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
} else if (IS_GEN(dev_priv, 8)) {
} else if (IS_GEN(i915, 8)) {
fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
} else if (IS_GEN_RANGE(i915, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
} else {
/* on devices with FW we expect to hit one of the above cases */
if (intel_uncore_has_forcewake(uncore))
MISSING_CASE(INTEL_GEN(dev_priv));
MISSING_CASE(INTEL_GEN(i915));
fw_domains = 0;
}
@ -1962,7 +1962,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
* @dev_priv: pointer to struct drm_i915_private
* @uncore: pointer to struct intel_uncore
* @reg: register in question
* @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
*
@ -1974,21 +1974,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
* callers to do FIFO management on their own or risk losing writes.
*/
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op)
{
enum forcewake_domains fw_domains = 0;
WARN_ON(!op);
if (!intel_uncore_has_forcewake(&dev_priv->uncore))
if (!intel_uncore_has_forcewake(uncore))
return 0;
if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
if (op & FW_REG_WRITE)
fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
return fw_domains;
}

View File

@ -193,7 +193,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op);
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)

View File

@ -905,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(dev_priv,
fw |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);

View File

@ -184,7 +184,7 @@ static int live_forcewake_ops(void *arg)
if (!engine->default_state)
continue;
fw_domains = intel_uncore_forcewake_for_reg(i915, mmio,
fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
FW_REG_READ);
if (!fw_domains)
continue;