1
0
Fork 0

drm/i915: Record GT workarounds in a list

To enable later verification of GT workaround state at various stages of
driver lifetime, we record the list of applicable ones per platforms to a
list, from which they are also applied.

The added data structure is a simple array of register, mask and value
items, which is allocated on demand as workarounds are added to the list.

This is a temporary implementation which later in the series gets fused
with the existing per context workaround list handling. It is separated at
this stage since the following patch fixes a bug which needs to be as easy
to backport as possible.

Also, since in the following patch we will be adding a new class of
workarounds (per engine) which can be applied from interrupt context, we
straight away make the provision for safe read-modify-write cycle.

v2:
 * Change dev_priv to i915 along the init path. (Chris Wilson)
 * API rename. (Chris Wilson)

v3:
 * Remove explicit list size tracking in favour of growing the allocation
   in power of two chunks. (Chris Wilson)

v4:
 Chris Wilson:
 * Change wa_list_finish to early return.
 * Copy workarounds using the compiler for static checking.
 * Do not bother zeroing unused entries.
 * Re-order struct i915_wa_list.

v5:
 * kmalloc_array.
 * Whitespace cleanup.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20181203133319.10174-1-tvrtko.ursulin@linux.intel.com
(cherry picked from commit 25d140faaa)
Fixes: 59b449d5c8 ("drm/i915: Split out functions for different kinds of workarounds")
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
hifive-unleashed-5.1
Tvrtko Ursulin 2018-12-05 11:33:23 +00:00 committed by Joonas Lahtinen
parent 2595646791
commit 009367791f
5 changed files with 352 additions and 160 deletions

View File

@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv); intel_uncore_sanitize(dev_priv);
intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv); i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the /* On the 945G/GM, the chipset reports the MSI capability on the

View File

@ -67,6 +67,7 @@
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#include "intel_uncore.h" #include "intel_uncore.h"
#include "intel_wopcm.h" #include "intel_wopcm.h"
#include "intel_workarounds.h"
#include "intel_uc.h" #include "intel_uc.h"
#include "i915_gem.h" #include "i915_gem.h"
@ -1805,6 +1806,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds; struct i915_workarounds workarounds;
struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking; struct i915_frontbuffer_tracking fb_tracking;

View File

@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
} }
} }
intel_gt_workarounds_apply(dev_priv); intel_gt_apply_workarounds(dev_priv);
i915_gem_init_swizzling(dev_priv); i915_gem_init_swizzling(dev_priv);
@ -5677,6 +5677,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
i915_gem_contexts_fini(dev_priv); i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
intel_wa_list_free(&dev_priv->gt_wa_list);
intel_cleanup_gt_powersave(dev_priv); intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv); intel_uc_fini_misc(dev_priv);

View File

@ -48,6 +48,20 @@
* - Public functions to init or apply the given workaround type. * - Public functions to init or apply the given workaround type.
*/ */
static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
wal->name = name;
}
static void wa_init_finish(struct i915_wa_list *wal)
{
if (!wal->count)
return;
DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
wal->count, wal->name);
}
static void wa_add(struct drm_i915_private *i915, static void wa_add(struct drm_i915_private *i915,
i915_reg_t reg, const u32 mask, const u32 val) i915_reg_t reg, const u32 mask, const u32 val)
{ {
@ -580,160 +594,239 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0; return 0;
} }
static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void
wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{ {
const unsigned int grow = 1 << 4;
GEM_BUG_ON(!is_power_of_2(grow));
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
GFP_KERNEL);
if (!list) {
DRM_ERROR("No space for workaround init!\n");
return;
}
if (wal->list)
memcpy(list, wal->list, sizeof(*wa) * wal->count);
wal->list = list;
}
wal->list[wal->count++] = *wa;
} }
static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{ {
struct i915_wa wa = {
.reg = reg,
.mask = val,
.val = _MASKED_BIT_ENABLE(val)
};
wal_add(wal, &wa);
} }
static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void
wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
u32 val)
{ {
struct i915_wa wa = {
.reg = reg,
.mask = mask,
.val = val
};
wal_add(wal, &wa);
}
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, ~0, val);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, val, val);
}
static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, wa_masked_en(wal,
_MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); GEN9_CSFE_CHICKEN1_RCS,
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | wa_write_or(wal,
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); BDW_SCRATCH1,
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaDisableKillLogic:bxt,skl,kbl */ /* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(dev_priv)) if (!IS_COFFEELAKE(i915))
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | wa_write_or(wal,
ECOCHK_DIS_TLB); GAM_ECOCHK,
ECOCHK_DIS_TLB);
if (HAS_LLC(dev_priv)) { if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
* *
* Must match Display Engine. See * Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode. * WaCompressedResourceDisplayNewHashMode.
*/ */
I915_WRITE(MMCD_MISC_CTRL, wa_write_or(wal,
I915_READ(MMCD_MISC_CTRL) | MMCD_MISC_CTRL,
MMCD_PCLA | MMCD_PCLA | MMCD_HOTSPOT_EN);
MMCD_HOTSPOT_EN);
} }
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | wa_write_or(wal,
BDW_DISABLE_HDC_INVALIDATION); GAM_ECOCHK,
BDW_DISABLE_HDC_INVALIDATION);
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(dev_priv)) { if (IS_GEN9_LP(i915))
u32 val = I915_READ(GEN8_L3SQCREG1); wa_write_masked_or(wal,
GEN8_L3SQCREG1,
val &= ~L3_PRIO_CREDITS_MASK; L3_PRIO_CREDITS_MASK,
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); L3_GENERAL_PRIO_CREDITS(62) |
I915_WRITE(GEN8_L3SQCREG1, val); L3_HIGH_PRIO_CREDITS(2));
}
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
I915_WRITE(GEN8_L3SQCREG4, wa_write_or(wal,
I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES); GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, wa_masked_en(wal,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
} }
static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{ {
gen9_gt_workarounds_apply(dev_priv); struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
/* WaEnableGapsTsvCreditFix:skl */ /* WaEnableGapsTsvCreditFix:skl */
I915_WRITE(GEN8_GARBCNTL, wa_write_or(wal,
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); GEN8_GARBCNTL,
GEN9_GAPS_TSV_CREDIT_DISABLE);
/* WaDisableGafsUnitClkGating:skl */ /* WaDisableGafsUnitClkGating:skl */
I915_WRITE(GEN7_UCGCTL4, wa_write_or(wal,
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */ /* WaInPlaceDecompressionHang:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, wa_write_or(wal,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
} }
static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{ {
gen9_gt_workarounds_apply(dev_priv); struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
/* WaDisablePooledEuLoadBalancingFix:bxt */ /* WaDisablePooledEuLoadBalancingFix:bxt */
I915_WRITE(FF_SLICE_CS_CHICKEN2, wa_masked_en(wal,
_MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); FF_SLICE_CS_CHICKEN2,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
/* WaInPlaceDecompressionHang:bxt */ /* WaInPlaceDecompressionHang:bxt */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, wa_write_or(wal,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
} }
static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{ {
gen9_gt_workarounds_apply(dev_priv); struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
/* WaEnableGapsTsvCreditFix:kbl */ /* WaEnableGapsTsvCreditFix:kbl */
I915_WRITE(GEN8_GARBCNTL, wa_write_or(wal,
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); GEN8_GARBCNTL,
GEN9_GAPS_TSV_CREDIT_DISABLE);
/* WaDisableDynamicCreditSharing:kbl */ /* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG, wa_write_or(wal,
I915_READ(GAMT_CHKN_BIT_REG) | GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */ /* WaDisableGafsUnitClkGating:kbl */
I915_WRITE(GEN7_UCGCTL4, wa_write_or(wal,
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */ /* WaInPlaceDecompressionHang:kbl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, wa_write_or(wal,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaKBLVECSSemaphoreWaitPoll:kbl */ /* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
unsigned int tmp; unsigned int tmp;
for_each_engine(engine, dev_priv, tmp) { for_each_engine(engine, i915, tmp) {
if (engine->id == RCS) if (engine->id == RCS)
continue; continue;
I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
} }
} }
} }
static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{ {
gen9_gt_workarounds_apply(dev_priv); gen9_gt_workarounds_init(i915);
} }
static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{ {
gen9_gt_workarounds_apply(dev_priv); struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
/* WaEnableGapsTsvCreditFix:cfl */ /* WaEnableGapsTsvCreditFix:cfl */
I915_WRITE(GEN8_GARBCNTL, wa_write_or(wal,
I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); GEN8_GARBCNTL,
GEN9_GAPS_TSV_CREDIT_DISABLE);
/* WaDisableGafsUnitClkGating:cfl */ /* WaDisableGafsUnitClkGating:cfl */
I915_WRITE(GEN7_UCGCTL4, wa_write_or(wal,
I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */ /* WaInPlaceDecompressionHang:cfl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, wa_write_or(wal,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
} }
static void wa_init_mcr(struct drm_i915_private *dev_priv) static void wa_init_mcr(struct drm_i915_private *dev_priv)
{ {
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
u32 mcr; struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask; u32 mcr_slice_subslice_mask;
/* /*
@ -770,8 +863,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask); WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
} }
mcr = I915_READ(GEN8_MCR_SELECTOR);
if (INTEL_GEN(dev_priv) >= 11) if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK; GEN11_MCR_SUBSLICE_MASK;
@ -789,148 +880,223 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent * occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg. * on s/ss combo, the read should be done with read_subslice_reg.
*/ */
mcr &= ~mcr_slice_subslice_mask; wa_write_masked_or(wal,
mcr |= intel_calculate_mcr_s_ss_select(dev_priv); GEN8_MCR_SELECTOR,
I915_WRITE(GEN8_MCR_SELECTOR, mcr); mcr_slice_subslice_mask,
intel_calculate_mcr_s_ss_select(dev_priv));
} }
static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{ {
wa_init_mcr(dev_priv); struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
I915_WRITE(GAMT_CHKN_BIT_REG, wa_write_or(wal,
I915_READ(GAMT_CHKN_BIT_REG) | GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */ /* WaInPlaceDecompressionHang:cnl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, wa_write_or(wal,
I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaEnablePreemptionGranularityControlByUMD:cnl */ /* WaEnablePreemptionGranularityControlByUMD:cnl */
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, wa_masked_en(wal,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
} }
static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{ {
wa_init_mcr(dev_priv); struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_mcr(i915);
/* This is not an Wa. Enable for better image quality */ /* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3, wa_masked_en(wal,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); _3D_CHICKEN3,
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaInPlaceDecompressionHang:icl */ /* WaInPlaceDecompressionHang:icl */
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | wa_write_or(wal,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaPipelineFlushCoherentLines:icl */ /* WaPipelineFlushCoherentLines:icl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | wa_write_or(wal,
GEN8_LQSC_FLUSH_COHERENT_LINES); GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
/* Wa_1405543622:icl /* Wa_1405543622:icl
* Formerly known as WaGAPZPriorityScheme * Formerly known as WaGAPZPriorityScheme
*/ */
I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | wa_write_or(wal,
GEN11_ARBITRATION_PRIO_ORDER_MASK); GEN8_GARBCNTL,
GEN11_ARBITRATION_PRIO_ORDER_MASK);
/* Wa_1604223664:icl /* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing * Formerly known as WaL3BankAddressHashing
*/ */
I915_WRITE(GEN8_GARBCNTL, wa_write_masked_or(wal,
(I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) | GEN8_GARBCNTL,
GEN11_HASH_CTRL_EXCL_BIT0); GEN11_HASH_CTRL_EXCL_MASK,
I915_WRITE(GEN11_GLBLINVL, GEN11_HASH_CTRL_EXCL_BIT0);
(I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) | wa_write_masked_or(wal,
GEN11_BANK_HASH_ADDR_EXCL_BIT0); GEN11_GLBLINVL,
GEN11_BANK_HASH_ADDR_EXCL_MASK,
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/* WaModifyGamTlbPartitioning:icl */ /* WaModifyGamTlbPartitioning:icl */
I915_WRITE(GEN11_GACB_PERF_CTRL, wa_write_masked_or(wal,
(I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | GEN11_GACB_PERF_CTRL,
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); GEN11_HASH_CTRL_MASK,
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405733216:icl /* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts * Formerly known as WaDisableCleanEvicts
*/ */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | wa_write_or(wal,
GEN11_LQSC_CLEAN_EVICT_DISABLE); GEN8_L3SQCREG4,
GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* Wa_1405766107:icl /* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc * Formerly known as WaCL2SFHalfMaxAlloc
*/ */
I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | wa_write_or(wal,
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | GEN11_LSN_UNSLCVC,
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl /* Wa_220166154:icl
* Formerly known as WaDisCtxReload * Formerly known as WaDisCtxReload
*/ */
I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | wa_write_or(wal,
GAMW_ECO_DEV_CTX_RELOAD_DISABLE); GEN8_GAMW_ECO_DEV_RW_IA,
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */ /* Wa_1405779004:icl (pre-prod) */
if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, wa_write_or(wal,
I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS); MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */ /* Wa_1406680159:icl */
I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, wa_write_or(wal,
I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | SUBSLICE_UNIT_LEVEL_CLKGATE,
GWUNIT_CLKGATE_DIS); GWUNIT_CLKGATE_DIS);
/* Wa_1604302699:icl */ /* Wa_1604302699:icl */
I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER, wa_write_or(wal,
I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) | GEN10_L3_CHICKEN_MODE_REGISTER,
GEN11_I2M_WRITE_DISABLE); GEN11_I2M_WRITE_DISABLE);
/* Wa_1406838659:icl (pre-prod) */ /* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
I915_WRITE(INF_UNIT_LEVEL_CLKGATE, wa_write_or(wal,
I915_READ(INF_UNIT_LEVEL_CLKGATE) | INF_UNIT_LEVEL_CLKGATE,
CGPSF_CLKGATE_DIS); CGPSF_CLKGATE_DIS);
/* WaForwardProgressSoftReset:icl */ /* WaForwardProgressSoftReset:icl */
I915_WRITE(GEN10_SCRATCH_LNCF2, wa_write_or(wal,
I915_READ(GEN10_SCRATCH_LNCF2) | GEN10_SCRATCH_LNCF2,
PMFLUSHDONE_LNICRSDROP | PMFLUSHDONE_LNICRSDROP |
PMFLUSH_GAPL3UNBLOCK | PMFLUSH_GAPL3UNBLOCK |
PMFLUSHDONE_LNEBLK); PMFLUSHDONE_LNEBLK);
/* Wa_1406463099:icl /* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError * Formerly known as WaGamTlbPendError
*/ */
I915_WRITE(GAMT_CHKN_BIT_REG, wa_write_or(wal,
I915_READ(GAMT_CHKN_BIT_REG) | GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE); GAMT_CHKN_DISABLE_L3_COH_PIPE);
} }
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) void intel_gt_init_workarounds(struct drm_i915_private *i915)
{ {
if (INTEL_GEN(dev_priv) < 8) struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_start(wal, "GT");
if (INTEL_GEN(i915) < 8)
return; return;
else if (IS_BROADWELL(dev_priv)) else if (IS_BROADWELL(i915))
bdw_gt_workarounds_apply(dev_priv); return;
else if (IS_CHERRYVIEW(dev_priv)) else if (IS_CHERRYVIEW(i915))
chv_gt_workarounds_apply(dev_priv); return;
else if (IS_SKYLAKE(dev_priv)) else if (IS_SKYLAKE(i915))
skl_gt_workarounds_apply(dev_priv); skl_gt_workarounds_init(i915);
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(i915))
bxt_gt_workarounds_apply(dev_priv); bxt_gt_workarounds_init(i915);
else if (IS_KABYLAKE(dev_priv)) else if (IS_KABYLAKE(i915))
kbl_gt_workarounds_apply(dev_priv); kbl_gt_workarounds_init(i915);
else if (IS_GEMINILAKE(dev_priv)) else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_apply(dev_priv); glk_gt_workarounds_init(i915);
else if (IS_COFFEELAKE(dev_priv)) else if (IS_COFFEELAKE(i915))
cfl_gt_workarounds_apply(dev_priv); cfl_gt_workarounds_init(i915);
else if (IS_CANNONLAKE(dev_priv)) else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_apply(dev_priv); cnl_gt_workarounds_init(i915);
else if (IS_ICELAKE(dev_priv)) else if (IS_ICELAKE(i915))
icl_gt_workarounds_apply(dev_priv); icl_gt_workarounds_init(i915);
else else
MISSING_CASE(INTEL_GEN(dev_priv)); MISSING_CASE(INTEL_GEN(i915));
wa_init_finish(wal);
}
static enum forcewake_domains
wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(dev_priv,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
return fw;
}
static void
wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
if (!wal->count)
return;
fw = wal_get_fw_for_rmw(dev_priv, wal);
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
intel_uncore_forcewake_get__locked(dev_priv, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val = I915_READ_FW(wa->reg);
val &= ~wa->mask;
val |= wa->val;
I915_WRITE_FW(wa->reg, val);
}
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
{
wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
} }
struct whitelist { struct whitelist {

View File

@ -7,10 +7,31 @@
#ifndef _I915_WORKAROUNDS_H_ #ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_ #define _I915_WORKAROUNDS_H_
#include <linux/slab.h>
struct i915_wa {
i915_reg_t reg;
u32 mask;
u32 val;
};
struct i915_wa_list {
const char *name;
struct i915_wa *list;
unsigned int count;
};
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
kfree(wal->list);
memset(wal, 0, sizeof(*wal));
}
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
int intel_ctx_workarounds_emit(struct i915_request *rq); int intel_ctx_workarounds_emit(struct i915_request *rq);
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);