Merge tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel into drm-next

- gpu idling rework for s/r (Imre)
- vlv mappable scanout fix
- speed up probing in resume (Lyude)
- dp audio workarounds for gen9 (Dhinakaran)
- more conversion to using dev_priv internally (Ville)
- more gen9+ wm fixes and cleanups (Maarten)
- shrinker cleanup&fixes (Chris)
- reorg plane init code (Ville)
- implement support for multiple timelines (prep work for scheduler)
  from Chris and all
- untangle dev->struct_mutex locking as prep for multiple timelines
  (Chris)
- refactor bxt phy code and collect it all in intel_dpio_phy.c (Ander)
- another gvt with bugfixes all over from Zhenyu
- piles of lspcon fixes from Imre
- 90/270 rotation fixes (Ville)
- guc log buffer support (Akash+Sagar)
- fbc fixes from Paulo
- untangle rpm vs. tiling-fences/mmaps (Chris)
- fix atomic commit to wait on the right fences (Daniel Stone)

* tag 'drm-intel-next-2016-11-08' of git://anongit.freedesktop.org/git/drm-intel: (181 commits)
  drm/i915: Update DRIVER_DATE to 20161108
  drm/i915: Mark CPU cache as dirty when used for rendering
  drm/i915: Add assert for no pending GPU requests during suspend/resume in LR mode
  drm/i915: Make sure engines are idle during GPU idling in LR mode
  drm/i915: Avoid early GPU idling due to race with new request
  drm/i915: Avoid early GPU idling due to already pending idle work
  drm/i915: Limit Valleyview and earlier to only using mappable scanout
  drm/i915: Round tile chunks up for constructing partial VMAs
  drm/i915: Remove the vma from the object list upon close
  drm/i915: Reinit polling before hpd when resuming
  drm/i915: Remove redundant reprobe in i915_drm_resume
  drm/i915/dp: Extend BDW DP audio workaround to GEN9 platforms
  drm/i915/dp: BDW cdclk fix for DP audio
  drm/i915: Fix pages pin counting around swizzle quirk
  drm/i915: Fix test on inputs for vma_compare()
  drm/i915/guc: Cache the client mapping
  drm/i915: Tidy slab cache allocations
  drm/i915: Introduce HAS_64BIT_RELOC
  drm/i915: Show the execlist queue in debugfs/i915_engine_info
  drm/i915: Unify global_list into global_link
  ...
This commit is contained in:
Dave Airlie 2016-11-11 09:25:32 +10:00
commit db8feb6979
79 changed files with 6772 additions and 4804 deletions

View file

@ -189,7 +189,7 @@ Display Refresh Rate Switching (DRRS)
DPIO
----
.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
.. kernel-doc:: drivers/gpu/drm/i915/intel_dpio_phy.c
:doc: DPIO
CSR firmware support for DMC

View file

@ -4060,7 +4060,6 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
W: https://01.org/linuxgraphics/
Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel

View file

@ -11,6 +11,7 @@ config DRM_I915
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
@ -24,16 +25,17 @@ config DRM_I915
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
replaces the older i830 module that supported a subset of the
hardware in older X.org releases.
This driver is used by the Intel driver in X.org 6.8 and
XFree86 4.4 and above. It replaces the older i830 module that
supported a subset of the hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
If "M" is selected, the module will be called i915.
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
@ -85,6 +87,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
default n
help
Choose this option if you want to enable Intel GVT-g graphics

View file

@ -35,16 +35,19 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_fence.o \
i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_trace_points.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \

View file

@ -1145,7 +1145,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->event = PRIMARY_B_FLIP_DONE;
break;
case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
info->pipe = PIPE_B;
info->pipe = PIPE_C;
info->event = PRIMARY_C_FLIP_DONE;
break;
default:
@ -1201,20 +1201,19 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
#define write_bits(reg, e, s, v) do { \
vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \
vgpu_vreg(vgpu, reg) |= (v << s); \
} while (0)
write_bits(info->surf_reg, 31, 12, info->surf_val);
if (IS_SKYLAKE(dev_priv))
write_bits(info->stride_reg, 9, 0, info->stride_val);
else
write_bits(info->stride_reg, 15, 6, info->stride_val);
write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10,
10, info->tile_val);
#undef write_bits
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
if (IS_SKYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
info->tile_val << 10);
} else {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
info->stride_val << 6);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
info->tile_val << 10);
}
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);

View file

@ -276,7 +276,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
pte = readq(addr);
#else
pte = ioread32(addr);
pte |= ioread32(addr + 4) << 32;
pte |= (u64)ioread32(addr + 4) << 32;
#endif
return pte;
}
@ -1944,7 +1944,7 @@ static int create_scratch_page(struct intel_vgpu *vgpu)
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr);
gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
__free_page(gtt->scratch_page);
gtt->scratch_page = NULL;
return -ENXIO;

View file

@ -65,6 +65,8 @@ struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
*/
int intel_gvt_init_host(void)
{
int ret;
if (intel_gvt_host.initialized)
return 0;
@ -90,7 +92,8 @@ int intel_gvt_init_host(void)
return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
if (!intel_gvt_hypervisor_detect_host())
ret = intel_gvt_hypervisor_detect_host();
if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
@ -103,19 +106,20 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
}
info->msi_cap_offset = pdev->msi_cap;
}
static int gvt_service_thread(void *data)

View file

@ -382,6 +382,8 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
#include "mpt.h"

View file

@ -239,7 +239,11 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu);
if (scheduler->current_vgpu == vgpu) {
/*
* The current_vgpu will set to NULL after stopping the
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
@ -247,6 +251,16 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
intel_vgpu_reset_execlist(vgpu, bitmap);
/* full GPU reset */
if (bitmap == 0xff) {
mutex_unlock(&vgpu->gvt->lock);
intel_vgpu_clean_gtt(vgpu);
mutex_lock(&vgpu->gvt->lock);
setup_vgpu_mmio(vgpu);
populate_pvinfo_page(vgpu);
intel_vgpu_init_gtt(vgpu);
}
vgpu->resetting = false;
return 0;
@ -258,6 +272,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data;
u64 bitmap = 0;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
@ -1305,7 +1320,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret;
int ret = 0;
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL;
@ -1313,12 +1328,15 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist = &vgpu->execlist[ring_id];
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3)
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_err("fail submit workload on ring %d\n", ring_id);
}
++execlist->elsp_dwords.index;
execlist->elsp_dwords.index &= 0x3;
return 0;
return ret;
}
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,

View file

@ -163,7 +163,7 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
*/
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
iounmap(gvt->opregion.opregion_va);
memunmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL;
}
@ -181,8 +181,8 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt)
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa);
gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
INTEL_GVT_OPREGION_SIZE);
gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n");
return -EFAULT;

View file

@ -118,6 +118,7 @@ static u32 gen9_render_mocs_L3[32];
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
[RCS] = 0x4260,
@ -135,11 +136,25 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
reg = _MMIO(regs[ring_id]);
I915_WRITE(reg, 0x1);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches,
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && IS_SKYLAKE(dev_priv))
fw |= FORCEWAKE_RENDER;
if (wait_for_atomic((I915_READ(reg) == 0), 50))
intel_uncore_forcewake_get(dev_priv, fw);
I915_WRITE_FW(reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
intel_uncore_forcewake_put(dev_priv, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@ -162,6 +177,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (!IS_SKYLAKE(dev_priv))
return;
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
@ -199,6 +215,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (!IS_SKYLAKE(dev_priv))
return;
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);

View file

@ -400,21 +400,27 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
ret = wait_event_interruptible(scheduler->waitq[ring_id],
kthread_should_stop() ||
(workload = pick_next_workload(gvt, ring_id)));
add_wait_queue(&scheduler->waitq[ring_id], &wait);
do {
workload = pick_next_workload(gvt, ring_id);
if (workload)
break;
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop());
remove_wait_queue(&scheduler->waitq[ring_id], &wait);
WARN_ON_ONCE(ret);
if (kthread_should_stop())
if (!workload)
break;
mutex_lock(&scheduler_mutex);
@ -444,10 +450,12 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
workload->status = i915_wait_request(workload->req,
0, NULL, NULL);
if (workload->status != 0)
lret = i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
if (lret < 0) {
workload->status = lret;
gvt_err("fail to wait workload, skip\n");
}
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",

View file

@ -41,7 +41,7 @@ static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
@ -103,7 +103,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
}
}
static void populate_pvinfo_page(struct intel_vgpu *vgpu)
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;

View file

@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
if (ret == 0 && needs_clflush_after)
drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;

View file

@ -107,12 +107,12 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
return obj->fault_mappable ? 'g' : ' ';
return !list_empty(&obj->userfault_link) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
return obj->mm.mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@ -136,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
unsigned int frontbuffer_bits;
int pin_count = 0;
enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
get_pin_flag(obj),
@ -149,17 +148,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
for_each_engine(engine, dev_priv, id)
seq_printf(m, "%x ",
i915_gem_active_get_seqno(&obj->last_read[id],
&obj->base.dev->struct_mutex));
seq_printf(m, "] %x %s%s%s",
i915_gem_active_get_seqno(&obj->last_write,
&obj->base.dev->struct_mutex),
obj->base.write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
@ -187,8 +179,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
engine = i915_gem_active_get_engine(&obj->last_write,
&dev_priv->drm.struct_mutex);
engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
@ -226,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
@ -236,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
@ -399,16 +390,16 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
if (obj->madv == I915_MADV_DONTNEED) {
if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
if (obj->mapping) {
if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@ -416,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
@ -425,12 +416,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
if (obj->mapping) {
if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@ -502,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
@ -561,7 +552,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@ -640,17 +631,10 @@ static void print_request(struct seq_file *m,
struct drm_i915_gem_request *rq,
const char *prefix)
{
struct pid *pid = rq->ctx->pid;
struct task_struct *task;
rcu_read_lock();
task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix,
rq->fence.seqno, rq->ctx->hw_id, rq->fence.seqno,
seq_printf(m, "%s%x [%x:%x] @ %d: %s\n", prefix,
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
task ? task->comm : "<unknown>",
task ? task->pid : -1);
rcu_read_unlock();
rq->timeline->common->name);
}
static int i915_gem_request_info(struct seq_file *m, void *data)
@ -671,13 +655,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
int count;
count = 0;
list_for_each_entry(req, &engine->request_list, link)
list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
list_for_each_entry(req, &engine->request_list, link)
list_for_each_entry(req, &engine->timeline->requests, link)
print_request(m, req, " ");
any++;
@ -699,14 +683,14 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
@ -743,17 +727,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe)
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
}
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
intel_display_power_put(dev_priv, power_domain);
}
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@ -1046,15 +1045,8 @@ static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
int ret;
ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
*val = dev_priv->next_seqno;
mutex_unlock(&dev_priv->drm.struct_mutex);
*val = atomic_read(&dev_priv->gt.global_timeline.next_seqno);
return 0;
}
@ -1069,7 +1061,7 @@ i915_next_seqno_set(void *data, u64 val)
if (ret)
return ret;
ret = i915_gem_set_seqno(dev, val);
ret = i915_gem_set_global_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret;
@ -1356,21 +1348,20 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno,
seqno[id],
engine->last_submitted_seqno);
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
@ -1396,14 +1387,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
@ -1411,7 +1397,6 @@ static int ironlake_drpc_info(struct seq_file *m)
crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@ -1674,11 +1659,13 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
if (intel_fbc_is_active(dev_priv) &&
INTEL_GEN(dev_priv) >= 7)
if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
BDW_FBC_COMPRESSION_MASK :
IVB_FBC_COMPRESSION_MASK;
seq_printf(m, "Compressing: %s\n",
yesno(I915_READ(FBC_STATUS2) &
FBC_COMPRESSION_MASK));
yesno(I915_READ(FBC_STATUS2) & mask));
}
mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
@ -1757,6 +1744,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
@ -1770,6 +1758,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
seq_printf(m, "self-refresh: %s\n",
@ -2015,7 +2004,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
if (i915_gem_object_get_pages(vma->obj)) {
if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
@ -2034,6 +2023,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state);
}
i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
@ -2091,12 +2081,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
@ -2136,7 +2121,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -2292,8 +2276,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
seq_printf(m, "GPU busy? %s [%x]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
@ -2328,7 +2312,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
if (INTEL_GEN(dev_priv) >= 6 &&
dev_priv->rps.enabled &&
dev_priv->gt.active_engines) {
dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@ -2409,6 +2393,32 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
return 0;
}
static void i915_guc_log_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
seq_puts(m, "\nGuC logging stats:\n");
seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
guc->log.flush_count[GUC_ISR_LOG_BUFFER],
guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
guc->log.flush_count[GUC_DPC_LOG_BUFFER],
guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
seq_printf(m, "\tTotal flush interrupt count: %u\n",
guc->log.flush_interrupt_count);
seq_printf(m, "\tCapture miss count: %u\n",
guc->log.capture_miss_count);
}
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
@ -2482,6 +2492,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
i915_guc_client_info(m, dev_priv, &client);
i915_guc_log_info(m, dev_priv);
/* Add more as required ... */
return 0;
@ -2493,10 +2505,10 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
int i = 0, pg;
if (!dev_priv->guc.log_vma)
if (!dev_priv->guc.log.vma)
return 0;
obj = dev_priv->guc.log_vma->obj;
obj = dev_priv->guc.log.vma->obj;
for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
@ -2513,6 +2525,44 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
static int i915_guc_log_control_get(void *data, u64 *val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv->guc.log.vma)
return -EINVAL;
*val = i915.guc_log_level;
return 0;
}
static int i915_guc_log_control_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (!dev_priv->guc.log.vma)
return -EINVAL;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
ret = i915_guc_log_control(dev_priv, val);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
i915_guc_log_control_get, i915_guc_log_control_set,
"%lld\n");
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -2542,11 +2592,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain))
continue;
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
intel_display_power_put(dev_priv, power_domain);
}
}
@ -3094,6 +3155,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_runtime_pm_get(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
@ -3103,7 +3166,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "%s\n", engine->name);
seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
intel_engine_get_seqno(engine),
engine->last_submitted_seqno,
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
engine->hangcheck.score);
@ -3111,14 +3174,14 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "\tRequests:\n");
rq = list_first_entry(&engine->request_list,
struct drm_i915_gem_request, link);
if (&rq->link != &engine->request_list)
rq = list_first_entry(&engine->timeline->requests,
struct drm_i915_gem_request, link);
if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tfirst ");
rq = list_last_entry(&engine->request_list,
struct drm_i915_gem_request, link);
if (&rq->link != &engine->request_list)
rq = list_last_entry(&engine->timeline->requests,
struct drm_i915_gem_request, link);
if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tlast ");
rq = i915_gem_find_active_request(engine);
@ -3192,6 +3255,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
else
seq_printf(m, "\t\tELSP[1] idle\n");
rcu_read_unlock();
spin_lock_irq(&engine->execlist_lock);
list_for_each_entry(rq, &engine->execlist_queue, execlist_link) {
print_request(m, rq, "\t\tQ ");
}
spin_unlock_irq(&engine->execlist_lock);
} else if (INTEL_GEN(dev_priv) > 6) {
seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
@ -3201,18 +3270,20 @@ static int i915_engine_info(struct seq_file *m, void *unused)
I915_READ(RING_PP_DIR_DCLV(engine)));
}
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
seq_puts(m, "\n");
}
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -3274,15 +3345,6 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
seq_puts(m, "\nSync seqno:\n");
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < num_rings; j++)
seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]);
seq_putc(m, '\n');
}
seq_putc(m, '\n');
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@ -3375,7 +3437,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
for_each_plane(dev_priv, pipe, plane) {
for_each_universal_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@ -4009,8 +4071,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
int ret = 0;
@ -4076,10 +4137,8 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source source)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@ -4150,15 +4209,15 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
pipe);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
if (crtc->base.state->active)
intel_wait_for_vblank(dev, pipe);
intel_wait_for_vblank(dev_priv, pipe);
drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
@ -4798,13 +4857,9 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN;
intel_runtime_pm_get(dev_priv);
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -4872,10 +4927,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE)
#define DROP_FREED 0x10
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE | \
DROP_FREED)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@ -4919,6 +4976,11 @@ i915_drop_caches_set(void *data, u64 val)
unlock:
mutex_unlock(&dev->struct_mutex);
if (val & DROP_FREED) {
synchronize_rcu();
flush_work(&dev_priv->mm.free_work);
}
return ret;
}
@ -5039,22 +5101,16 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@ -5414,7 +5470,8 @@ static const struct i915_debugfs_files {
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops}
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops}
};
void intel_display_crc_init(struct drm_i915_private *dev_priv)

View file

@ -537,14 +537,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
static void i915_gem_fini(struct drm_device *dev)
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_cleanup_engines(&dev_priv->drm);
i915_gem_context_fini(&dev_priv->drm);
mutex_unlock(&dev_priv->drm.struct_mutex);
WARN_ON(!list_empty(&to_i915(dev)->context_list));
rcu_barrier();
flush_work(&dev_priv->mm.free_work);
WARN_ON(!list_empty(&dev_priv->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@ -592,7 +595,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
ret = intel_modeset_init(dev);
if (ret)
goto cleanup_irq;
intel_guc_init(dev);
@ -619,7 +624,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
if (i915_gem_suspend(dev))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev);
i915_gem_fini(dev_priv);
cleanup_irq:
intel_guc_fini(dev);
drm_irq_uninstall(dev);
@ -825,10 +830,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
i915_gem_load_init(&dev_priv->drm);
ret = i915_gem_load_init(&dev_priv->drm);
if (ret < 0)
goto err_gvt;
intel_display_crc_init(dev_priv);
@ -838,6 +846,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
return 0;
err_gvt:
intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@ -972,7 +982,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
@ -1030,7 +1039,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@ -1111,6 +1120,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
i915_guc_register(dev_priv);
i915_setup_sysfs(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@ -1149,6 +1159,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
i915_guc_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@ -1303,7 +1314,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
i915_gem_fini(dev);
i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@ -1425,7 +1436,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
intel_suspend_hw(dev);
intel_suspend_hw(dev_priv);
i915_gem_suspend_gtt_mappings(dev);
@ -1589,6 +1600,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev);
drm_kms_helper_poll_enable(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@ -1596,8 +1609,6 @@ static int i915_drm_resume(struct drm_device *dev)
* notifications.
* */
intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
intel_opregion_register(dev_priv);
@ -1610,7 +1621,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
@ -2254,7 +2264,6 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@ -2278,10 +2287,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv);
if (rpm_resume) {
intel_init_clock_gating(dev);
i915_gem_restore_fences(dev);
}
if (rpm_resume)
intel_init_clock_gating(dev_priv);
return ret;
}
@ -2301,32 +2308,13 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n");
/*
* We could deadlock here in case another thread holding struct_mutex
* calls RPM suspend concurrently, since the RPM suspend will wait
* first for this RPM suspend to finish. In this case the concurrent
* RPM resume will be followed by its RPM suspend counterpart. Still
* for consistency return -EAGAIN, which will reschedule this suspend.
*/
if (!mutex_trylock(&dev->struct_mutex)) {
DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
/*
* Bump the expiration timestamp, otherwise the suspend won't
* be rescheduled.
*/
pm_runtime_mark_last_busy(kdev);
return -EAGAIN;
}
disable_rpm_wakeref_asserts(dev_priv);
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev);
@ -2591,7 +2579,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
.gem_free_object = i915_gem_free_object,
.gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,

View file

@ -41,6 +41,7 @@
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
@ -62,6 +63,7 @@
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
#include "intel_gvt.h"
@ -70,8 +72,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20161024"
#define DRIVER_TIMESTAMP 1477290335
#define DRIVER_DATE "20161108"
#define DRIVER_TIMESTAMP 1478587895
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@ -183,7 +185,7 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
enum port {
PORT_NONE = -1,
@ -312,7 +314,7 @@ struct i915_hotplug {
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if ((__mask) & (1 << (__p)))
#define for_each_plane(__dev_priv, __pipe, __p) \
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
@ -492,8 +494,8 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
@ -501,7 +503,7 @@ struct drm_i915_display_funcs {
void (*initial_watermarks)(struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@ -523,7 +525,7 @@ struct drm_i915_display_funcs {
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
@ -668,6 +670,7 @@ struct intel_csr {
func(is_kabylake); \
func(is_preliminary); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
@ -746,6 +749,8 @@ struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
struct timeval boottime;
struct timeval uptime;
struct drm_i915_private *i915;
@ -778,6 +783,7 @@ struct drm_i915_error_state {
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
@ -797,7 +803,6 @@ struct drm_i915_error_state {
u32 cpu_ring_tail;
u32 last_seqno;
u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@ -933,6 +938,7 @@ struct i915_gem_context {
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
struct pid *pid;
const char *name;
struct i915_ctx_hang_stats hang_stats;
@ -1319,6 +1325,12 @@ struct i915_power_well {
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
/* unique identifier for this power well */
unsigned long id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
unsigned long data;
const struct i915_power_well_ops *ops;
};
@ -1356,11 +1368,22 @@ struct i915_gem_mm {
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
* are idle and not used by the GPU) but still have
* (presumably uncached) pages still attached.
* are idle and not used by the GPU). These objects may or may
* not actually have any pages attached.
*/
struct list_head unbound_list;
/** List of all objects in gtt_space, currently mmaped by userspace.
* All objects within this list must also be on bound_list.
*/
struct list_head userfault_list;
/**
* List of objects which are pending destruction.
*/
struct llist_head free_list;
struct work_struct free_work;
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@ -1409,6 +1432,9 @@ struct i915_error_state_file_priv {
struct drm_i915_error_state *error;
};
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@ -1682,7 +1708,6 @@ struct skl_wm_level {
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};
@ -1807,7 +1832,6 @@ struct drm_i915_private {
struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@ -1832,8 +1856,10 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
u32 pm_irq_mask;
u32 pm_imr;
u32 pm_ier;
u32 pm_rps_events;
u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@ -1910,8 +1936,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
@ -2065,6 +2091,10 @@ struct drm_i915_private {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
struct list_head timelines;
struct i915_gem_timeline global_timeline;
u32 active_requests;
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
@ -2072,7 +2102,6 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
unsigned int active_engines;
bool awake;
/**
@ -2092,6 +2121,8 @@ struct drm_i915_private {
* off the idle_work.
*/
struct delayed_work idle_work;
ktime_t last_init_time;
} gt;
/* perform PHY state sanity checks? */
@ -2151,6 +2182,7 @@ enum hdmi_force_audio {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@ -2165,8 +2197,8 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
@ -2200,10 +2232,20 @@ struct drm_i915_gem_object {
/** List of VMAs backed by this object */
struct list_head vma_list;
struct rb_root vma_tree;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
struct list_head global_link;
union {
struct rcu_head rcu;
struct llist_node freed;
};
/**
* Whether the object is currently in the GGTT mmap.
*/
struct list_head userfault_link;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
@ -2211,33 +2253,12 @@ struct drm_i915_gem_object {
struct list_head batch_pool_link;
unsigned long flags;
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
#define I915_BO_ACTIVE_SHIFT 0
#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
#define __I915_BO_ACTIVE(bo) \
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
/**
* This is set if the object has been written to since last bound
* to the GTT
* Have we taken a reference for the object for incomplete GPU
* activity?
*/
unsigned int dirty:1;
/**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
/**
* Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more
* accurate mappable working set.
*/
unsigned int fault_mappable:1;
#define I915_BO_ACTIVE_REF 0
/*
* Is the object to be mapped as read-only to the GPU
@ -2258,15 +2279,41 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
unsigned int bind_count;
unsigned int active_count;
unsigned int pin_display;
struct sg_table *pages;
int pages_pin_count;
struct get_page {
struct scatterlist *sg;
int last;
} get_page;
void *mapping;
struct {
struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count;
struct sg_table *pages;
void *mapping;
struct i915_gem_object_page_iter {
struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */
struct radix_tree_root radix;
struct mutex lock; /* protects this cache */
} get_page;
/**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
/**
* This is set if the object has been written to since the
* pages were last acquired.
*/
bool dirty:1;
/**
* This is set if the object has been pinned due to unknown
* swizzling.
*/
bool quirked:1;
} mm;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
@ -2278,8 +2325,7 @@ struct drm_i915_gem_object {
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
*/
struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
@ -2290,8 +2336,6 @@ struct drm_i915_gem_object {
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
@ -2300,6 +2344,8 @@ struct drm_i915_gem_object {
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
struct reservation_object __builtin_resv;
};
static inline struct drm_i915_gem_object *
@ -2311,10 +2357,38 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
* @handle: userspace handle
*
* Returns:
*
* A pointer to the object named by the handle if such exists on @filp, NULL
* otherwise. This object is only valid whilst under the RCU read lock, and
* note carefully the object may be in the process of being destroyed.
*/
static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
{
#ifdef CONFIG_LOCKDEP
WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
#endif
return idr_find(&file->object_idr, handle);
}
static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
return to_intel_bo(drm_gem_object_lookup(file, handle));
struct drm_i915_gem_object *obj;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, handle);
if (obj && !kref_get_unless_zero(&obj->base.refcount))
obj = NULL;
rcu_read_unlock();
return obj;
}
__deprecated
@ -2336,59 +2410,61 @@ __attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
drm_gem_object_unreference(&obj->base);
__drm_gem_object_unreference(&obj->base);
}
__deprecated
extern void drm_gem_object_unreference(struct drm_gem_object *);
__attribute__((nonnull))
static inline void
i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
{
drm_gem_object_unreference_unlocked(&obj->base);
}
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
return atomic_read(&obj->base.refcount.refcount) == 0;
}
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
}
static inline unsigned long
i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_active(obj);
}
static inline void
i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
{
obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
}
static inline void
i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
{
obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
return obj->active_count;
}
static inline bool
i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
int engine)
i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
{
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
static inline void
i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
static inline void
i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
@ -2407,6 +2483,23 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
return obj->tiling_and_stride & STRIDE_MASK;
}
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
struct intel_engine_cs *engine = NULL;
struct dma_fence *fence;
rcu_read_lock();
fence = reservation_object_get_excl_rcu(obj->resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
engine = to_request(fence)->engine;
dma_fence_put(fence);
return engine;
}
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
i915_gem_object_get(vma->obj);
@ -2415,7 +2508,6 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
static inline void i915_vma_put(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
i915_gem_object_put(vma->obj);
}
@ -2445,6 +2537,14 @@ static __always_inline struct sgt_iter {
return s;
}
static inline struct scatterlist *____sg_next(struct scatterlist *sg)
{
++sg;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
return sg;
}
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
@ -2459,9 +2559,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
return sg_is_last(sg) ? NULL :
likely(!sg_is_chain(++sg)) ? sg :
sg_chain_ptr(sg);
return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/**
@ -2633,20 +2731,20 @@ struct drm_i915_cmd_table {
#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
@ -2659,7 +2757,7 @@ struct drm_i915_cmd_table {
#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
@ -2803,7 +2901,7 @@ struct drm_i915_cmd_table {
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
@ -2820,6 +2918,8 @@ struct drm_i915_cmd_table {
#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@ -2912,6 +3012,7 @@ extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@ -3095,7 +3196,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load_init(struct drm_device *dev);
int i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
@ -3127,70 +3228,85 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __sg_page_count(struct scatterlist *sg)
static inline int __sg_page_count(const struct scatterlist *sg)
{
return sg->length >> PAGE_SHIFT;
}
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n, unsigned int *offset);
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
static inline dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n);
dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
if (n < obj->get_page.last) {
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
}
might_lock(&obj->mm.lock);
while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
obj->get_page.last += __sg_page_count(obj->get_page.sg++);
if (unlikely(sg_is_chain(obj->get_page.sg)))
obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
}
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
return __i915_gem_object_get_pages(obj);
}
static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
return NULL;
GEM_BUG_ON(!obj->mm.pages);
if (n < obj->get_page.last) {
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
}
while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
obj->get_page.last += __sg_page_count(obj->get_page.sg++);
if (unlikely(sg_is_chain(obj->get_page.sg)))
obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
}
return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
atomic_inc(&obj->mm.pages_pin_count);
}
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
return atomic_read(&obj->mm.pages_pin_count);
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
__i915_gem_object_unpin_pages(obj);
}
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
I915_MM_NORMAL = 0,
I915_MM_SHRINKER
};
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
@ -3206,8 +3322,8 @@ enum i915_map_type {
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
* The caller must hold the struct_mutex, and is responsible for calling
* i915_gem_object_unpin_map() when the mapping is no longer required.
* The caller is responsible for calling i915_gem_object_unpin_map() when the
* mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
@ -3223,12 +3339,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
*
* The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
@ -3261,7 +3374,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
@ -3300,9 +3413,10 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
int __must_check i915_gem_suspend(struct drm_device *dev);
void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
struct intel_rps_client *rps);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@ -3384,6 +3498,7 @@ int __must_check i915_vma_put_fence(struct i915_vma *vma);
static inline bool
i915_vma_pin_fence(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (vma->fence) {
vma->fence->pin_count++;
return true;
@ -3402,6 +3517,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (vma->fence) {
GEM_BUG_ON(vma->fence->pin_count <= 0);
vma->fence->pin_count--;
@ -3411,8 +3527,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_gem_restore_fences(struct drm_device *dev);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@ -3422,6 +3540,9 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
struct i915_vma *
i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
unsigned int flags);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@ -3455,6 +3576,16 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
static inline struct intel_timeline *
i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct i915_address_space *vm;
vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
return &vm->timeline.engine[engine->id];
}
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
@ -3508,6 +3639,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
unsigned int size);
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
@ -3690,7 +3826,7 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
@ -3745,6 +3881,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
void bxt_port_to_phy_channel(enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
enum port port, u32 margin, u32 scale,
u32 enable, u32 deemphasis);
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
uint8_t lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask);
uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
@ -3834,11 +3987,30 @@ __raw_write(64, q)
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections inside IRQ handlers where forcewake is explicitly
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
* controlled.
*
* Think twice, and think again, before using these.
* Note: Should only be used between intel_uncore_forcewake_irqlock() and
* intel_uncore_forcewake_irqunlock().
*
* As an example, these accessors can possibly be used between:
*
* spin_lock_irq(&dev_priv->uncore.lock);
* intel_uncore_forcewake_get__locked();
*
* and
*
* intel_uncore_forcewake_put__locked();
* spin_unlock_irq(&dev_priv->uncore.lock);
*
*
* Note: some registers may not need forcewake held, so
* intel_uncore_forcewake_{get,put} can be omitted, see
* intel_uncore_forcewake_for_reg().
*
* Certain architectures will die if the same cacheline is concurrently accessed
* by different clients (e.g. on Ivybridge). Access to registers should
* therefore generally be serialised, by either the dev_priv->uncore.lock or
* a more localised lock guarding all access to that bank of registers.
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@ -3915,7 +4087,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
if (i915_gem_request_completed(req))
if (__i915_gem_request_completed(req))
return true;
/* Ensure our read of the seqno is coherent so that we
@ -3966,7 +4138,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
wake_up_process(tsk);
rcu_read_unlock();
if (i915_gem_request_completed(req))
if (__i915_gem_request_completed(req))
return true;
}

File diff suppressed because it is too large Load diff

View file

@ -31,4 +31,6 @@
#define GEM_BUG_ON(expr)
#endif
#define I915_NUM_ENGINES 5
#endif /* __I915_GEM_H__ */

View file

@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
i915_gem_object_put(obj);
__i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next;
struct drm_i915_gem_object *tmp;
struct list_head *list;
int n;
int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
&tmp->base.dev->struct_mutex))
if (i915_gem_object_is_active(tmp))
break;
/* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) {
list_del(&tmp->batch_pool_link);
i915_gem_object_put(tmp);
continue;
}
GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
true));
if (tmp->base.size >= size) {
/* Clear the set of shared fences early */
ww_mutex_lock(&tmp->resv->lock, NULL);
reservation_object_add_excl_fence(tmp->resv, NULL);
ww_mutex_unlock(&tmp->resv->lock);
obj = tmp;
break;
}
}
if (obj == NULL) {
int ret;
obj = i915_gem_object_create(&pool->engine->i915->drm, size);
obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
ret = i915_gem_object_get_pages(obj);
if (ret)
return ERR_PTR(ret);
obj->madv = I915_MADV_DONTNEED;
}
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ERR_PTR(ret);
list_move_tail(&obj->batch_pool_link, list);
i915_gem_object_pin_pages(obj);
return obj;
}

View file

@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring)
intel_ring_free(ce->ring);
i915_vma_put(ce->state);
__i915_gem_object_release_unless_active(ce->state->obj);
}
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
}
/* Default context will never have a file_priv */
if (file_priv != NULL) {
ret = DEFAULT_CONTEXT_HANDLE;
if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
ret = DEFAULT_CONTEXT_HANDLE;
}
ctx->user_handle = ret;
ctx->file_priv = file_priv;
if (file_priv)
if (file_priv) {
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
current->comm,
pid_nr(ctx->pid),
ctx->user_handle);
if (!ctx->name) {
ret = -ENOMEM;
goto err_pid;
}
}
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
return ctx;
err_pid:
put_pid(ctx->pid);
idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
context_close(ctx);
return ERR_PTR(ret);
@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt =
i915_ppgtt_create(to_i915(dev), file_priv);
struct i915_hw_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@ -751,12 +764,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
struct i915_vma *
i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
unsigned int flags)
{
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
/* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret)
return ERR_PTR(ret);
}
ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
if (ret)
return ERR_PTR(ret);
return vma;
}
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct i915_vma *vma = to->engine[RCS].state;
struct i915_vma *vma;
struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
@ -764,17 +801,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
/* Clear this page out of any CPU caches for coherent swap-in/out. */
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret)
return ret;
}
/* Trying to pin first makes error handling easier. */
ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
if (ret)
return ret;
vma = i915_gem_context_pin_legacy(to, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
/*
* Pin can switch back to the default context if we end up calling into
@ -931,22 +961,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
struct i915_gem_timeline *timeline;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
if (engine->last_context == NULL)
continue;
if (engine->last_context == dev_priv->kernel_context)
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
struct drm_i915_gem_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
i915_sw_fence_await_sw_fence_gfp(&req->submit,
&prev->submit,
GFP_KERNEL);
}
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)

View file

@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
ret = i915_mutex_lock_interruptible(obj->base.dev);
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto err_unpin;
goto err_unpin_pages;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->pages->sgl;
src = obj->mm.pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) {
for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
ret =-ENOMEM;
ret = -ENOMEM;
goto err_free_sg;
}
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
err_unpin:
err_unpin_pages:
i915_gem_object_unpin_pages(obj);
err_unlock:
mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg);
kfree(sg);
mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
void *addr;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
mutex_unlock(&dev->struct_mutex);
return addr;
return i915_gem_object_pin_map(obj, I915_MAP_WB);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj);
mutex_unlock(&dev->struct_mutex);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
int err;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
ret = i915_gem_object_set_to_cpu_domain(obj, write);
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out;
err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex);
return ret;
out:
i915_gem_object_unpin_pages(obj);
return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
int err;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
ret = i915_gem_object_set_to_gtt_domain(obj, false);
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out;
err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex);
return ret;
out:
i915_gem_object_unpin_pages(obj);
return err;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
static void export_fences(struct drm_i915_gem_object *obj,
struct dma_buf *dma_buf)
{
struct reservation_object *resv = dma_buf->resv;
struct drm_i915_gem_request *req;
unsigned long active;
int idx;
active = __I915_BO_ACTIVE(obj);
if (!active)
return;
/* Serialise with execbuf to prevent concurrent fence-loops */
mutex_lock(&obj->base.dev->struct_mutex);
/* Mark the object for future fences before racily adding old fences */
obj->base.dma_buf = dma_buf;
ww_mutex_lock(&resv->lock, NULL);
for_each_active(active, idx) {
req = i915_gem_active_get(&obj->last_read[idx],
&obj->base.dev->struct_mutex);
if (!req)
continue;
if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &req->fence);
i915_gem_request_put(req);
}
req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req) {
reservation_object_add_excl_fence(resv, &req->fence);
i915_gem_request_put(req);
}
ww_mutex_unlock(&resv->lock);
mutex_unlock(&obj->base.dev->struct_mutex);
}
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
if (IS_ERR(dma_buf))
return dma_buf;
export_fences(obj, dma_buf);
return dma_buf;
return drm_gem_dmabuf_export(dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
static struct sg_table *
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
struct sg_table *sg;
sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg))
return PTR_ERR(sg);
obj->pages = sg;
return 0;
return dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL);
}
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment(obj->base.import_attach, pages,
DMA_BIDIRECTIONAL);
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all

View file

@ -33,14 +33,17 @@
#include "intel_drv.h"
#include "i915_trace.h"
static bool
gpu_is_idle(struct drm_i915_private *dev_priv)
static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
if (intel_engine_is_active(engine))
struct intel_timeline *tl;
tl = &ggtt->base.timeline.engine[engine->id];
if (i915_gem_active_isset(&tl->last_request))
return false;
}
@ -56,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_add(&vma->exec_list, unwind);
@ -103,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next;
int ret;
lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@ -153,7 +157,7 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
if (gpu_is_idle(dev_priv)) {
if (ggtt_is_idle(dev_priv)) {
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
@ -213,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
lockdep_assert_held(&target->vm->dev->struct_mutex);
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
@ -266,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {

View file

@ -34,7 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1;
cache->vaddr = 0;
cache->i915 = i915;
cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->node.allocated = false;
}
@ -418,15 +418,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
unsigned long offset;
void *vaddr;
if (cache->node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
cache->node.start, I915_CACHE_NONE, 0);
cache->page = page;
return unmask_page(cache->vaddr);
}
if (cache->vaddr) {
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
@ -466,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@ -1109,44 +1101,20 @@ err:
return ret;
}
static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
{
unsigned int mask;
mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
mask <<= I915_BO_ACTIVE_SHIFT;
return mask;
}
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
struct reservation_object *resv;
if (obj->flags & other_rings) {
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
}
resv = i915_gem_object_get_dmabuf_resv(obj);
if (resv) {
ret = i915_sw_fence_await_reservation
(&req->submit, resv, &i915_fence_ops,
obj->base.pending_write_domain, 10*HZ,
GFP_KERNEL | __GFP_NOWARN);
if (ret < 0)
return ret;
}
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
@ -1279,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
obj->cache_level == I915_CACHE_WT);
}
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req,
unsigned int flags)
@ -1288,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
obj->dirty = 1; /* be paranoid */
/* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
@ -1297,37 +1269,32 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
if (!i915_gem_object_is_active(obj))
i915_gem_object_get(obj);
i915_gem_object_set_active(obj, idx);
i915_gem_active_set(&obj->last_read[idx], req);
if (!i915_vma_is_active(vma))
obj->active_count++;
i915_vma_set_active(vma, idx);
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
i915_gem_active_set(&obj->last_write, req);
i915_gem_active_set(&vma->last_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
i915_vma_set_active(vma, idx);
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
unsigned int flags)
{
struct reservation_object *resv;
resv = i915_gem_object_get_dmabuf_resv(obj);
if (!resv)
return;
struct reservation_object *resv = obj->resv;
/* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed

View file

@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */
@ -368,19 +371,14 @@ i915_vma_get_fence(struct i915_vma *vma)
* @dev: DRM device
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume.
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
void i915_gem_restore_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
/* Note that this may be called outside of struct_mutex, by
* runtime suspend/resume. The barrier we require is enforced by
* rpm itself - all access to fences/GTT are only within an rpm
* wakeref, and to acquire that wakeref you must pass through here.
*/
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
@ -391,7 +389,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(vma->obj->fault_mappable);
GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
@ -646,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
@ -656,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again).
*/
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@ -666,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return;
i = 0;
for_each_sgt_page(page, sgt_iter, obj->pages) {
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
@ -680,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
@ -705,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0;
for_each_sgt_page(page, sgt_iter, obj->pages) {
for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else

View file

@ -31,6 +31,7 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
@ -175,7 +176,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
vma->pages = vma->obj->pages;
vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
@ -706,6 +707,16 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
@ -715,9 +726,9 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned int pte_start = gen8_pte_index(start);
unsigned int num_entries = gen8_pte_count(start, length);
uint64_t pte;
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
gen8_pte_t *pt_vaddr;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
@ -725,7 +736,9 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
if (WARN_ON(!px_page(pt)))
return false;
bitmap_clear(pt->used_ptes, pte_start, num_entries);
GEM_BUG_ON(pte_end > GEN8_PTES);
bitmap_clear(pt->used_ptes, pte, num_entries);
if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
free_pt(vm->dev, pt);
@ -734,8 +747,8 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
pt_vaddr = kmap_px(pt);
for (pte = pte_start; pte < num_entries; pte++)
pt_vaddr[pte] = scratch_pte;
while (pte < pte_end)
pt_vaddr[pte++] = scratch_pte;
kunmap_px(ppgtt, pt_vaddr);
@ -806,6 +819,8 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
}
}
mark_tlbs_dirty(ppgtt);
if (USES_FULL_48BIT_PPGTT(vm->dev) &&
bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
free_pdp(vm->dev, pdp);
@ -1280,16 +1295,6 @@ err_out:
return -ENOMEM;
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
}
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
@ -2184,8 +2189,10 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
}
static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
struct drm_i915_private *dev_priv,
const char *name)
{
i915_gem_timeline_init(dev_priv, &vm->timeline, name);
drm_mm_init(&vm->mm, vm->start, vm->total);
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
@ -2214,14 +2221,15 @@ static void gtt_write_workarounds(struct drm_device *dev)
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
struct drm_i915_file_private *file_priv,
const char *name)
{
int ret;
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret == 0) {
kref_init(&ppgtt->ref);
i915_address_space_init(&ppgtt->base, dev_priv);
i915_address_space_init(&ppgtt->base, dev_priv, name);
ppgtt->base.file = file_priv;
}
@ -2257,7 +2265,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv)
struct drm_i915_file_private *fpriv,
const char *name)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@ -2266,7 +2275,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
if (!ppgtt)
return ERR_PTR(-ENOMEM);
ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
@ -2289,6 +2298,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
i915_gem_timeline_fini(&ppgtt->base.timeline);
list_del(&ppgtt->base.global_link);
drm_mm_takedown(&ppgtt->base.mm);
@ -2370,14 +2380,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
return -ENOSPC;
if (dma_map_sg(&obj->base.dev->pdev->dev,
pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL))
return 0;
return 0;
return -ENOSPC;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@ -2395,16 +2406,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@ -2418,11 +2424,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gen8_pte_t __iomem *gtt_entries;
gen8_pte_t gtt_entry;
dma_addr_t addr;
int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
@ -2446,8 +2449,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
@ -2486,16 +2487,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
@ -2515,11 +2511,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t __iomem *gtt_entries;
gen6_pte_t gtt_entry;
dma_addr_t addr;
int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
@ -2542,8 +2535,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void nop_clear_range(struct i915_address_space *vm,
@ -2554,7 +2545,6 @@ static void nop_clear_range(struct i915_address_space *vm,
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@ -2562,9 +2552,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@ -2576,15 +2563,12 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@ -2592,9 +2576,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@ -2607,8 +2588,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
@ -2617,16 +2596,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@ -2634,39 +2607,25 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_clear_range(first_entry, num_entries);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@ -2679,8 +2638,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
intel_runtime_pm_put(i915);
/*
* Without aliasing PPGTT there's no difference between
@ -2696,6 +2657,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags;
int ret;
@ -2710,14 +2672,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start,
cache_level, pte_flags);
intel_runtime_pm_put(i915);
}
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt =
to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start,
cache_level, pte_flags);
@ -2728,19 +2691,24 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
if (vma->flags & I915_VMA_GLOBAL_BIND)
if (vma->flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm,
vma->node.start, size);
intel_runtime_pm_put(i915);
}
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
vma->node.start, size);
}
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev;
@ -2754,8 +2722,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
}
dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
@ -3274,11 +3241,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
ggtt->base.total -= PAGE_SIZE;
i915_address_space_init(&ggtt->base, dev_priv);
i915_address_space_init(&ggtt->base, dev_priv, "[global]");
ggtt->base.total += PAGE_SIZE;
if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
dev_priv->ggtt.mappable_base,
@ -3327,7 +3296,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
&dev_priv->mm.bound_list, global_list) {
&dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@ -3386,6 +3355,7 @@ i915_vma_retire(struct i915_gem_active *active,
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
container_of(active, struct i915_vma, last_read[idx]);
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
@ -3396,6 +3366,34 @@ i915_vma_retire(struct i915_gem_active *active,
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(!i915_gem_object_is_active(obj));
if (--obj->active_count)
return;
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
if (obj->bind_count)
list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
obj->mm.dirty = true; /* be paranoid */
if (i915_gem_object_has_active_reference(obj)) {
i915_gem_object_clear_active_reference(obj);
i915_gem_object_put(obj);
}
}
static void
i915_ggtt_retire__write(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
struct i915_vma *vma =
container_of(active, struct i915_vma, last_write);
intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
}
void i915_vma_destroy(struct i915_vma *vma)
@ -3417,17 +3415,40 @@ void i915_vma_close(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
list_del_init(&vma->obj_link);
list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
WARN_ON(i915_vma_unbind(vma));
}
static inline long vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
GEM_BUG_ON(view && !i915_is_ggtt(vm));
if (vma->vm != vm)
return vma->vm - vm;
if (!view)
return vma->ggtt_view.type;
if (vma->ggtt_view.type != view->type)
return vma->ggtt_view.type - view->type;
return memcmp(&vma->ggtt_view.params,
&view->params,
sizeof(view->params));
}
static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
struct rb_node *rb, **p;
int i;
GEM_BUG_ON(vm->closed);
@ -3439,6 +3460,8 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_write,
i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
@ -3459,40 +3482,36 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
if (i915_is_ggtt(vm)) {
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
list_add_tail(&vma->obj_link, &obj->vma_list);
}
list_add_tail(&vma->obj_link, &obj->vma_list);
rb = NULL;
p = &obj->vma_tree.rb_node;
while (*p) {
struct i915_vma *pos;
rb = *p;
pos = rb_entry(rb, struct i915_vma, obj_node);
if (vma_compare(pos, vm, view) < 0)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma_tree);
return vma;
}
static inline bool vma_matches(struct i915_vma *vma,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
if (vma->vm != vm)
return false;
if (!i915_vma_is_ggtt(vma))
return true;
if (!view)
return vma->ggtt_view.type == 0;
if (vma->ggtt_view.type != view->type)
return false;
return memcmp(&vma->ggtt_view.params,
&view->params,
sizeof(view->params)) == 0;
}
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
@ -3504,12 +3523,23 @@ i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
struct rb_node *rb;
list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
if (vma_matches(vma, vm, view))
rb = obj->vma_tree.rb_node;
while (rb) {
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
cmp = vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
if (cmp < 0)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
}
@ -3520,11 +3550,14 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm, view);
if (!vma)
if (!vma) {
vma = __i915_vma_create(obj, vm, view);
GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
}
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
@ -3590,7 +3623,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
@ -3626,35 +3659,47 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
struct scatterlist *sg;
struct sg_page_iter obj_sg_iter;
struct scatterlist *sg, *iter;
unsigned int count = view->params.partial.size;
unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
ret = sg_alloc_table(st, count, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
iter = i915_gem_object_get_sg(obj,
view->params.partial.offset,
&offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
st->nents = 0;
for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
view->params.partial.offset)
{
if (st->nents >= view->params.partial.size)
break;
do {
unsigned int len;
sg_set_page(sg, NULL, PAGE_SIZE, 0);
sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
sg_dma_len(sg) = PAGE_SIZE;
len = min(iter->length - (offset << PAGE_SHIFT),
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
sg_dma_address(iter) + (offset << PAGE_SHIFT);
sg_dma_len(sg) = len;
sg = sg_next(sg);
st->nents++;
}
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg);
return st;
}
return st;
sg = __sg_next(sg);
iter = __sg_next(iter);
offset = 0;
} while (1);
err_sg_alloc:
kfree(st);
@ -3667,11 +3712,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
/* The vma->pages are only valid within the lifespan of the borrowed
* obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
* must be the vma->pages. A simple rule is that vma->pages must only
* be accessed when the obj->mm.pages are pinned.
*/
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
if (vma->pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->pages = vma->obj->pages;
vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
@ -3778,11 +3830,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma);
if (!vma)
return;
obj = vma->obj;
i915_vma_unpin(vma);
i915_vma_put(vma);
i915_vma_close(vma);
__i915_gem_object_release_unless_active(obj);
}

View file

@ -211,6 +211,7 @@ struct i915_vma {
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct i915_gem_active last_fence;
/**
@ -226,6 +227,7 @@ struct i915_vma {
struct list_head vm_link;
struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node;
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@ -341,6 +343,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
struct i915_gem_timeline timeline;
struct drm_device *dev;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
@ -612,7 +615,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv);
struct drm_i915_file_private *fpriv,
const char *name);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@ -628,8 +632,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)

View file

@ -0,0 +1,170 @@
/*
* Copyright © 2014-2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
/* convert swiotlb segment size into sensible units (pages)! */
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
static void internal_free_pages(struct sg_table *st)
{
struct scatterlist *sg;
for (sg = st->sgl; sg; sg = __sg_next(sg))
__free_pages(sg_page(sg), get_order(sg->length));
sg_free_table(st);
kfree(st);
}
static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int npages = obj->base.size / PAGE_SIZE;
struct sg_table *st;
struct scatterlist *sg;
int max_order;
gfp_t gfp;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
st->nents = 0;
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
/* 965gm cannot relocate objects above 4GiB. */
gfp &= ~__GFP_HIGHMEM;
gfp |= __GFP_DMA32;
}
do {
int order = min(fls(npages) - 1, max_order);
struct page *page;
do {
page = alloc_pages(gfp | (order ? QUIET : 0), order);
if (page)
break;
if (!order--)
goto err;
/* Limit subsequent allocations as well */
max_order = order;
} while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0);
st->nents++;
npages -= 1 << order;
if (!npages) {
sg_mark_end(sg);
break;
}
sg = __sg_next(sg);
} while (1);
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
/* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker,
* and the caller is expected to repopulate - the contents of this
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
return st;
err:
sg_mark_end(sg);
internal_free_pages(st);
return ERR_PTR(-ENOMEM);
}
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
i915_gem_gtt_finish_pages(obj, pages);
internal_free_pages(pages);
obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,
.put_pages = i915_gem_object_put_pages_internal,
};
/**
* Creates a new object that wraps some internal memory for private use.
* This object is not backed by swappable storage, and as such its contents
* are volatile and only valid whilst pinned. If the object is reaped by the
* shrinker, its pages and data will be discarded. Equally, it is not a full
* GEM object and so not valid for access from userspace. This makes it useful
* for hardware interfaces like ringbuffers (which are pinned from the time
* the request is written to the time the hardware stops accessing it), but
* not for contexts (which need to be preserved when not active for later
* reuse). Note that it is not cleared upon allocation.
*/
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
unsigned int size)
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(&i915->drm);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
}

View file

@ -28,17 +28,19 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
struct render_state {
struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma;
u32 aux_batch_size;
u32 aux_batch_offset;
u32 batch_offset;
u32 batch_size;
u32 aux_offset;
u32 aux_size;
};
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct drm_i915_gem_request *req)
render_state_get_rodata(const struct intel_engine_cs *engine)
{
switch (INTEL_GEN(req->i915)) {
switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
case 7:
@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
*/
#define OUT_BATCH(batch, i, val) \
do { \
if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
ret = -ENOSPC; \
goto err_out; \
} \
if ((i) >= PAGE_SIZE / sizeof(u32)) \
goto err; \
(batch)[(i)++] = (val); \
} while(0)
static int render_state_setup(struct render_state *so)
static int render_state_setup(struct intel_render_state *so,
struct drm_i915_private *i915)
{
struct drm_i915_private *dev_priv = to_i915(so->vma->vm->dev);
const struct intel_renderstate_rodata *rodata = so->rodata;
const bool has_64bit_reloc = INTEL_GEN(dev_priv) >= 8;
struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
struct page *page;
unsigned int needs_clflush;
u32 *d;
int ret;
ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret)
return ret;
page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
d = kmap(page);
d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
s = lower_32_bits(r);
if (has_64bit_reloc) {
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0) {
ret = -EINVAL;
goto err_out;
}
rodata->batch[i + 1] != 0)
goto err;
d[i++] = s;
s = upper_32_bits(r);
@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
goto err;
}
so->batch_offset = so->vma->node.start;
so->batch_size = rodata->batch_items * sizeof(u32);
while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP);
so->aux_batch_offset = i * sizeof(u32);
so->aux_offset = i * sizeof(u32);
if (HAS_POOLED_EU(dev_priv)) {
if (HAS_POOLED_EU(i915)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config
@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
}
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
so->aux_size = i * sizeof(u32) - so->aux_offset;
so->aux_offset += so->batch_offset;
/*
* Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8.
*/
so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
so->aux_size = ALIGN(so->aux_size, 8);
kunmap(page);
if (needs_clflush)
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
if (ret)
return ret;
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
return 0;
err_out:
kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(obj, false);
out:
i915_gem_obj_finish_shmem_access(obj);
return ret;
err:
kunmap_atomic(d);
ret = -EINVAL;
goto out;
}
#undef OUT_BATCH
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
struct render_state so;
struct intel_render_state *so;
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
int ret;
if (WARN_ON(req->engine->id != RCS))
return -ENOENT;
so.rodata = render_state_get_rodata(req);
if (!so.rodata)
if (engine->id != RCS)
return 0;
if (so.rodata->batch_items * 4 > 4096)
rodata = render_state_get_rodata(engine);
if (!rodata)
return 0;
if (rodata->batch_items * 4 > 4096)
return -EINVAL;
obj = i915_gem_object_create(&req->i915->drm, 4096);
if (IS_ERR(obj))
return PTR_ERR(obj);
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return -ENOMEM;
so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
if (IS_ERR(so.vma)) {
ret = PTR_ERR(so.vma);
obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free;
}
so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma);
goto err_obj;
}
ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
if (ret)
goto err_obj;
so->rodata = rodata;
engine->render_state = so;
return 0;
ret = render_state_setup(&so);
if (ret)
goto err_unpin;
err_obj:
i915_gem_object_put(obj);
err_free:
kfree(so);
return ret;
}
ret = req->engine->emit_bb_start(req, so.vma->node.start,
so.rodata->batch_items * 4,
int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
{
struct intel_render_state *so;
int ret;
lockdep_assert_held(&req->i915->drm.struct_mutex);
so = req->engine->render_state;
if (!so)
return 0;
/* Recreate the page after shrinking */
if (!so->vma->obj->mm.pages)
so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
return ret;
if (so->vma->node.start != so->batch_offset) {
ret = render_state_setup(so, req->i915);
if (ret)
goto err_unpin;
}
ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
if (so.aux_batch_size > 8) {
if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req,
(so.vma->node.start +
so.aux_batch_offset),
so.aux_batch_size,
so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
}
i915_vma_move_to_active(so.vma, req, 0);
i915_vma_move_to_active(so->vma, req, 0);
err_unpin:
i915_vma_unpin(so.vma);
err_obj:
i915_gem_object_put(obj);
i915_vma_unpin(so->vma);
return ret;
}
void i915_gem_render_state_fini(struct intel_engine_cs *engine)
{
struct intel_render_state *so;
struct drm_i915_gem_object *obj;
so = fetch_and_zero(&engine->render_state);
if (!so)
return;
obj = so->vma->obj;
i915_vma_close(so->vma);
__i915_gem_object_release_unless_active(obj);
kfree(so);
}

View file

@ -26,6 +26,8 @@
struct drm_i915_gem_request;
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
int i915_gem_render_state_init(struct intel_engine_cs *engine);
int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct intel_engine_cs *engine);
#endif /* _I915_GEM_RENDER_STATE_H_ */

View file

@ -23,6 +23,7 @@
*/
#include <linux/prefetch.h>
#include <linux/dma-fence-array.h>
#include "i915_drv.h"
@ -33,13 +34,7 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
/* Timelines are bound by eviction to a VM. However, since
* we only have a global seqno at the moment, we only have
* a single timeline. Note that each timeline will have
* multiple execution contexts (fence contexts) as we allow
* engines within a single timeline to execute in parallel.
*/
return "global";
return to_request(fence)->timeline->common->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@ -58,43 +53,9 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout_jiffies)
signed long timeout)
{
s64 timeout_ns, *timeout;
int ret;
if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
timeout_ns = jiffies_to_nsecs(timeout_jiffies);
timeout = &timeout_ns;
} else {
timeout = NULL;
}
ret = i915_wait_request(to_request(fence),
interruptible, timeout,
NO_WAITBOOST);
if (ret == -ETIME)
return 0;
if (ret < 0)
return ret;
if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
timeout_jiffies = nsecs_to_jiffies(timeout_ns);
return timeout_jiffies;
}
static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
snprintf(str, size, "%u",
intel_engine_get_seqno(to_request(fence)->engine));
return i915_wait_request(to_request(fence), interruptible, timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@ -111,8 +72,6 @@ const struct dma_fence_ops i915_fence_ops = {
.signaled = i915_fence_signaled,
.wait = i915_fence_wait,
.release = i915_fence_release,
.fence_value_str = i915_fence_value_str,
.timeline_value_str = i915_fence_timeline_value_str,
};
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@ -164,8 +123,14 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_request_completed(request));
trace_i915_gem_request_retire(request);
list_del(&request->link);
spin_lock_irq(&request->engine->timeline->lock);
list_del_init(&request->link);
spin_unlock_irq(&request->engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@ -177,6 +142,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
request->i915->gt.active_requests--;
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@ -214,6 +180,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_put(request->ctx);
dma_fence_signal(&request->fence);
i915_gem_request_put(request);
}
@ -223,10 +191,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
GEM_BUG_ON(list_empty(&req->link));
if (list_empty(&req->link))
return;
do {
tmp = list_first_entry(&engine->request_list,
tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
i915_gem_request_retire(tmp);
@ -253,40 +222,51 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
return 0;
}
static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
for_each_engine(engine, dev_priv, id) {
ret = intel_engine_idle(engine,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
}
i915_gem_retire_requests(dev_priv);
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
i915_gem_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
while (intel_kick_waiters(dev_priv) ||
intel_kick_signalers(dev_priv))
if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
yield();
yield();
}
atomic_set(&timeline->next_seqno, seqno);
/* Finally reset hw state */
for_each_engine(engine, dev_priv, id)
intel_engine_init_seqno(engine, seqno);
for_each_engine(engine, i915, id)
intel_engine_init_global_seqno(engine, seqno);
list_for_each_entry(timeline, &i915->gt.timelines, link) {
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
}
}
return 0;
}
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
@ -294,29 +274,37 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
return i915_gem_init_global_seqno(dev_priv, seqno - 1);
}
static int reserve_global_seqno(struct drm_i915_private *i915)
{
u32 active_requests = ++i915->gt.active_requests;
u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
int ret;
/* Reservation is fine until we need to wrap around */
if (likely(next_seqno + active_requests > next_seqno))
return 0;
ret = i915_gem_init_global_seqno(i915, 0);
if (ret) {
i915->gt.active_requests--;
return ret;
}
dev_priv->next_seqno = seqno;
return 0;
}
static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
/* reserve 0 for non-seqno */
if (unlikely(dev_priv->next_seqno == 0)) {
int ret;
/* next_seqno only incremented under a mutex */
return ++tl->next_seqno.counter;
}
ret = i915_gem_init_seqno(dev_priv, 0);
if (ret)
return ret;
dev_priv->next_seqno = 1;
}
*seqno = dev_priv->next_seqno++;
return 0;
static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
{
return atomic_inc_return(&tl->next_seqno);
}
static int __i915_sw_fence_call
@ -324,18 +312,46 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), submit);
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
unsigned long flags;
u32 seqno;
if (state != FENCE_COMPLETE)
return NOTIFY_DONE;
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
/* Will be called from irq-context when using foreign DMA fences */
spin_lock_irqsave(&timeline->lock, flags);
switch (state) {
case FENCE_COMPLETE:
request->engine->last_submitted_seqno = request->fence.seqno;
request->engine->submit_request(request);
break;
seqno = timeline_get_seqno(timeline->common);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
case FENCE_FREE:
break;
}
GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
request->previous_seqno = timeline->last_submitted_seqno;
timeline->last_submitted_seqno = seqno;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = seqno;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
intel_engine_enable_signaling(request);
spin_unlock(&request->lock);
GEM_BUG_ON(!request->global_seqno);
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
engine->submit_request(request);
spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
spin_unlock_irqrestore(&timeline->lock, flags);
return NOTIFY_DONE;
}
@ -358,9 +374,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
u32 seqno;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
@ -369,10 +386,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
ret = reserve_global_seqno(dev_priv);
if (ret)
return ERR_PTR(ret);
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->request_list,
req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
if (req && i915_gem_request_completed(req))
if (req && __i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@ -383,7 +404,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* of being read by __i915_gem_active_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
* read the request->fence.seqno and increment the reference count.
* read the request->global_seqno and increment the reference count.
*
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
@ -404,19 +425,20 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* Do not use kmem_cache_zalloc() here!
*/
req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
if (!req) {
ret = -ENOMEM;
goto err_unreserve;
}
ret = i915_gem_get_seqno(dev_priv, &seqno);
if (ret)
goto err;
req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
GEM_BUG_ON(req->timeline == engine->timeline);
spin_lock_init(&req->lock);
dma_fence_init(&req->fence,
&i915_fence_ops,
&req->lock,
engine->fence_context,
seqno);
req->timeline->fence_context,
__timeline_get_seqno(req->timeline->common));
i915_sw_fence_init(&req->submit, submit_notify);
@ -426,6 +448,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@ -438,6 +461,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* away, e.g. because a GPU scheduler has deferred it.
*/
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
@ -457,8 +481,9 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
err_ctx:
i915_gem_context_put(ctx);
err:
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
dev_priv->gt.active_requests--;
return ERR_PTR(ret);
}
@ -466,15 +491,28 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
int idx, ret;
int ret;
GEM_BUG_ON(to == from);
if (to->engine == from->engine)
if (to->timeline == from->timeline)
return 0;
idx = intel_engine_sync_index(from->engine, to->engine);
if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
if (to->engine == from->engine) {
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
GFP_KERNEL);
return ret < 0 ? ret : 0;
}
if (!from->global_seqno) {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
GFP_KERNEL);
return ret < 0 ? ret : 0;
}
if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@ -492,7 +530,54 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
return 0;
}
int
i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
struct dma_fence *fence)
{
struct dma_fence_array *array;
int ret;
int i;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return 0;
if (dma_fence_is_i915(fence))
return i915_gem_request_await_request(req, to_request(fence));
if (!dma_fence_is_array(fence)) {
ret = i915_sw_fence_await_dma_fence(&req->submit,
fence, I915_FENCE_TIMEOUT,
GFP_KERNEL);
return ret < 0 ? ret : 0;
}
/* Note that if the fence-array was created in signal-on-any mode,
* we should *not* decompose it into its individual fences. However,
* we don't currently store which mode the fence-array is operating
* in. Fortunately, the only user of signal-on-any is private to
* amdgpu and we should not see any incoming fence-array from
* sync-file being in signal-on-any mode.
*/
array = to_dma_fence_array(fence);
for (i = 0; i < array->num_fences; i++) {
struct dma_fence *child = array->fences[i];
if (dma_fence_is_i915(child))
ret = i915_gem_request_await_request(req,
to_request(child));
else
ret = i915_sw_fence_await_dma_fence(&req->submit,
child, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
}
return 0;
}
@ -521,40 +606,47 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
struct i915_gem_active *active;
unsigned long active_mask;
int idx;
struct dma_fence *excl;
int ret = 0;
if (write) {
active_mask = i915_gem_object_get_active(obj);
active = obj->last_read;
} else {
active_mask = 1;
active = &obj->last_write;
}
struct dma_fence **shared;
unsigned int count, i;
for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;
int ret;
request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (!request)
continue;
ret = i915_gem_request_await_request(to, request);
ret = reservation_object_get_fences_rcu(obj->resv,
&excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
ret = i915_gem_request_await_dma_fence(to, shared[i]);
if (ret)
break;
dma_fence_put(shared[i]);
}
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(obj->resv);
}
return 0;
if (excl) {
if (ret == 0)
ret = i915_gem_request_await_dma_fence(to, excl);
dma_fence_put(excl);
}
return ret;
}
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->gt.active_engines |= intel_engine_flag(engine);
if (dev_priv->gt.awake)
return;
@ -580,11 +672,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
u32 request_start;
u32 reserved_tail;
int ret;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
/*
@ -592,8 +684,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
request_start = ring->tail;
reserved_tail = request->reserved_space;
request->reserved_space = 0;
/*
@ -604,10 +694,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* what.
*/
if (flush_caches) {
ret = engine->emit_flush(request, EMIT_FLUSH);
err = engine->emit_flush(request, EMIT_FLUSH);
/* Not allowed to fail! */
WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
/* Record the position of the start of the breadcrumb so that
@ -615,20 +705,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
GEM_BUG_ON(err);
request->postfix = ring->tail;
/* Not allowed to fail! */
ret = engine->emit_request(request);
WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
/* Sanity check that the reserved size was large enough. */
ret = ring->tail - request_start;
if (ret < 0)
ret += ring->size;
WARN_ONCE(ret > reserved_tail,
"Not enough space reserved (%d bytes) "
"for adding the request (%d bytes)\n",
reserved_tail, ret);
ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@ -636,18 +716,24 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* see a more recent value in the hws than we are tracking.
*/
prev = i915_gem_active_raw(&engine->last_request,
prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
if (prev)
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
request->emitted_jiffies = jiffies;
request->previous_seqno = engine->last_pending_seqno;
engine->last_pending_seqno = request->fence.seqno;
i915_gem_active_set(&engine->last_request, request);
list_add_tail(&request->link, &engine->request_list);
spin_lock_irq(&timeline->lock);
list_add_tail(&request->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
request->fence.seqno));
timeline->last_submitted_seqno = request->fence.seqno;
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);
@ -715,7 +801,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
if (i915_gem_request_completed(req))
if (__i915_gem_request_completed(req))
return true;
if (signal_pending_state(state, current))
@ -730,76 +816,101 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
static long
__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
unsigned int flags,
long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
DEFINE_WAIT(reset);
DEFINE_WAIT(wait);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(q, &reset);
do {
prepare_to_wait(&request->submit.wait, &wait, state);
if (i915_sw_fence_done(&request->submit))
break;
if (flags & I915_WAIT_LOCKED &&
i915_reset_in_progress(&request->i915->gpu_error)) {
__set_current_state(TASK_RUNNING);
i915_reset(request->i915);
reset_wait_queue(q, &reset);
continue;
}
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
timeout = io_schedule_timeout(timeout);
} while (timeout);
finish_wait(&request->submit.wait, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(q, &reset);
return timeout;
}
/**
* i915_wait_request - wait until execution of request has finished
* @req: duh!
* @req: the request to wait upon
* @flags: how to wait
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
* @rps: client to charge for RPS boosting
* @timeout: how long to wait in jiffies
*
* Note: It is of utmost importance that the passed in seqno and reset_counter
* values have been read by the caller in an smp safe manner. Where read-side
* locks are involved, it is sufficient to read the reset_counter before
* unlocking the lock that protects the seqno. For lockless tricks, the
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
* i915_wait_request() waits for the request to be completed, for a
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
* Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
* If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
* in via the flags, and vice versa if the struct_mutex is not held, the caller
* must not specify that the wait is locked.
*
* Returns the remaining time (in jiffies) if the request completed, which may
* be zero or -ETIME if the request is unfinished after the timeout expires.
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
* pending before the request completes.
*/
int i915_wait_request(struct drm_i915_gem_request *req,
unsigned int flags,
s64 *timeout,
struct intel_rps_client *rps)
long i915_wait_request(struct drm_i915_gem_request *req,
unsigned int flags,
long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
struct intel_wait wait;
unsigned long timeout_remain;
int ret = 0;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
GEM_BUG_ON(debug_locks &&
!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
GEM_BUG_ON(timeout < 0);
if (i915_gem_request_completed(req))
return 0;
return timeout;
timeout_remain = MAX_SCHEDULE_TIMEOUT;
if (timeout) {
if (WARN_ON(*timeout < 0))
return -EINVAL;
if (*timeout == 0)
return -ETIME;
/* Record current time in case interrupted, or wedged */
timeout_remain = nsecs_to_jiffies_timeout(*timeout);
*timeout += ktime_get_raw_ns();
}
if (!timeout)
return -ETIME;
trace_i915_gem_request_wait_begin(req);
/* This client is about to stall waiting for the GPU. In many cases
* this is undesirable and limits the throughput of the system, as
* many clients cannot continue processing user input/output whilst
* blocked. RPS autotuning may take tens of milliseconds to respond
* to the GPU load and thus incurs additional latency for the client.
* We can circumvent that by promoting the GPU frequency to maximum
* before we wait. This makes the GPU throttle up much more quickly
* (good for benchmarks and user experience, e.g. window animations),
* but at a cost of spending more power processing the workload
* (bad for battery). Not all clients even want their results
* immediately and for them we should just let the GPU select its own
* frequency to maximise efficiency. To prevent a single client from
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
if (!i915_sw_fence_done(&req->submit)) {
timeout = __i915_request_wait_for_submit(req, flags, timeout);
if (timeout < 0)
goto complete;
GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
}
GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
@ -809,7 +920,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_wait_init(&wait, req->fence.seqno);
intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@ -819,16 +930,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
if (signal_pending_state(state, current)) {
ret = -ERESTARTSYS;
timeout = -ERESTARTSYS;
break;
}
timeout_remain = io_schedule_timeout(timeout_remain);
if (timeout_remain == 0) {
ret = -ETIME;
if (!timeout) {
timeout = -ETIME;
break;
}
timeout = io_schedule_timeout(timeout);
if (intel_wait_complete(&wait))
break;
@ -875,74 +987,39 @@ wakeup:
complete:
trace_i915_gem_request_wait_end(req);
if (timeout) {
*timeout -= ktime_get_raw_ns();
if (*timeout < 0)
*timeout = 0;
/*
* Apparently ktime isn't accurate enough and occasionally has a
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
* things up to make the test happy. We allow up to 1 jiffy.
*
* This is a regrssion from the timespec->ktime conversion.
*/
if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
*timeout = 0;
}
if (IS_RPS_USER(rps) &&
req->fence.seqno == req->engine->last_submitted_seqno) {
/* The GPU is now idle and this client has stalled.
* Since no other client has submitted a request in the
* meantime, assume that this client is the only one
* supplying work to the GPU but is unable to keep that
* work supplied because it is waiting. Since the GPU is
* then never kept fully busy, RPS autoclocking will
* keep the clocks relatively low, causing further delays.
* Compensate by giving the synchronous client credit for
* a waitboost next time.
*/
spin_lock(&req->i915->rps.client_lock);
list_del_init(&rps->link);
spin_unlock(&req->i915->rps.client_lock);
}
return ret;
return timeout;
}
static bool engine_retire_requests(struct intel_engine_cs *engine)
static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
list_for_each_entry_safe(request, next, &engine->request_list, link) {
if (!i915_gem_request_completed(request))
return false;
list_for_each_entry_safe(request, next,
&engine->timeline->requests, link) {
if (!__i915_gem_request_completed(request))
return;
i915_gem_request_retire(request);
}
return true;
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
unsigned int tmp;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (dev_priv->gt.active_engines == 0)
if (!dev_priv->gt.active_requests)
return;
GEM_BUG_ON(!dev_priv->gt.awake);
for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
if (engine_retire_requests(engine))
dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
for_each_engine(engine, dev_priv, id)
engine_retire_requests(engine);
if (dev_priv->gt.active_engines == 0)
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.idle_work,
msecs_to_jiffies(100));
if (!dev_priv->gt.active_requests)
mod_delayed_work(dev_priv->wq,
&dev_priv->gt.idle_work,
msecs_to_jiffies(100));
}

View file

@ -81,11 +81,14 @@ struct drm_i915_gem_request {
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
struct intel_timeline *timeline;
struct intel_signal_node signaling;
struct i915_sw_fence submit;
wait_queue_t submitq;
u32 global_seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
@ -147,7 +150,7 @@ struct drm_i915_gem_request {
extern const struct dma_fence_ops i915_fence_ops;
static inline bool fence_is_i915(struct dma_fence *fence)
static inline bool dma_fence_is_i915(const struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@ -162,7 +165,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
static inline u32
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{
return req ? req->fence.seqno : 0;
return req ? req->global_seqno : 0;
}
static inline struct intel_engine_cs *
@ -176,7 +179,7 @@ to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
GEM_BUG_ON(fence && !fence_is_i915(fence));
GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
return container_of(fence, struct drm_i915_gem_request, fence);
}
@ -214,6 +217,8 @@ int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write);
int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
struct dma_fence *fence);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
@ -226,13 +231,13 @@ struct intel_rps_client;
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
int i915_wait_request(struct drm_i915_gem_request *req,
unsigned int flags,
s64 *timeout,
struct intel_rps_client *rps)
long i915_wait_request(struct drm_i915_gem_request *req,
unsigned int flags,
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@ -245,17 +250,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
__i915_gem_request_started(const struct drm_i915_gem_request *req)
{
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
{
if (!req->global_seqno)
return false;
return __i915_gem_request_started(req);
}
static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->global_seqno);
}
static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->fence.seqno);
if (!req->global_seqno)
return false;
return __i915_gem_request_completed(req);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
@ -263,7 +288,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
return (i915_gem_request_started(request) &&
return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us));
}
@ -551,54 +576,14 @@ i915_gem_active_isset(const struct i915_gem_active *active)
return rcu_access_pointer(active->request);
}
/**
* i915_gem_active_is_idle - report whether the active tracker is idle
* @active - the active tracker
*
* i915_gem_active_is_idle() returns true if the active tracker is currently
* unassigned or if the request is complete (but not yet retired). Requires
* the caller to hold struct_mutex (but that can be relaxed if desired).
*/
static inline bool
i915_gem_active_is_idle(const struct i915_gem_active *active,
struct mutex *mutex)
{
return !i915_gem_active_peek(active, mutex);
}
/**
* i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait
*
* i915_gem_active_wait() waits until the request is completed before
* returning. Note that it does not guarantee that the request is
* retired first, see i915_gem_active_retire().
*
* i915_gem_active_wait() returns immediately if the active
* request is already complete.
*/
static inline int __must_check
i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
{
struct drm_i915_gem_request *request;
request = i915_gem_active_peek(active, mutex);
if (!request)
return 0;
return i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
}
/**
* i915_gem_active_wait_unlocked - waits until the request is completed
* @active - the active request on which to wait
* @flags - how to wait
* @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost
*
* i915_gem_active_wait_unlocked() waits until the request is completed before
* i915_gem_active_wait() waits until the request is completed before
* returning, without requiring any locks to be held. Note that it does not
* retire any requests before returning.
*
@ -614,21 +599,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
* Returns 0 if successful, or a negative error code.
*/
static inline int
i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
unsigned int flags,
s64 *timeout,
struct intel_rps_client *rps)
i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
struct drm_i915_gem_request *request;
int ret = 0;
long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
ret = i915_wait_request(request, flags, timeout, rps);
ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(request);
}
return ret;
return ret < 0 ? ret : 0;
}
/**
@ -645,7 +627,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
struct drm_i915_gem_request *request;
int ret;
long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
@ -653,8 +635,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
if (ret)
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
list_del_init(&active->link);
@ -665,24 +647,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0;
}
/* Convenience functions for peeking at state inside active's request whilst
* guarded by the struct_mutex.
*/
static inline uint32_t
i915_gem_active_get_seqno(const struct i915_gem_active *active,
struct mutex *mutex)
{
return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
}
static inline struct intel_engine_cs *
i915_gem_active_get_engine(const struct i915_gem_active *active,
struct mutex *mutex)
{
return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
}
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))

View file

@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
*unlock = false;
} else {
*unlock = true;
}
return true;
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@ -66,8 +80,11 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
/* Only shmemfs objects are backed by swap */
if (!obj->base.filp)
if (!obj->mm.pages)
return false;
/* Consider only shrinkable ojects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
/* Only report true if by unbinding the object and putting its pages
@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
if (obj->pages_pin_count > obj->bind_count)
if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
if (any_vma_pinned(obj))
@ -88,7 +105,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
return swap_available() || obj->madv == I915_MADV_DONTNEED;
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
return !READ_ONCE(obj->mm.pages);
}
/**
@ -128,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
bool unlock;
if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@ -171,15 +199,19 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
global_list))) {
list_move_tail(&obj->global_list, &still_in_list);
global_link))) {
list_move_tail(&obj->global_link, &still_in_list);
if (!obj->mm.pages) {
list_del_init(&obj->global_link);
continue;
}
if (flags & I915_SHRINK_PURGEABLE &&
obj->madv != I915_MADV_DONTNEED)
obj->mm.madv != I915_MADV_DONTNEED)
continue;
if (flags & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mapping))
!is_vmalloc_addr(obj->mm.mapping))
continue;
if (!(flags & I915_SHRINK_ACTIVE) &&
@ -190,22 +222,28 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj))
continue;
i915_gem_object_get(obj);
/* For the unbound phase, this should be a no-op! */
i915_gem_object_unbind(obj);
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
if (!obj->mm.pages) {
__i915_gem_object_invalidate(obj);
list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
}
}
list_splice(&still_in_list, phase->list);
list_splice_tail(&still_in_list, phase->list);
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
if (unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
@ -239,19 +277,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed;
}
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
*unlock = false;
} else
*unlock = true;
return true;
}
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@ -268,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv);
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@ -373,13 +398,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (!obj->mm.pages)
continue;
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!obj->mm.pages)
continue;
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else

View file

@ -109,7 +109,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
if (INTEL_GEN(dev_priv) >= 3) {
u32 bsm;
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
@ -138,7 +138,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I865_TOUD, &toud);
base = (toud << 16) + tseg_size;
} else if (IS_I85X(dev)) {
} else if (IS_I85X(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@ -546,25 +546,29 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static struct sg_table *
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
BUG();
return -EINVAL;
return i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->start,
obj->stolen->size);
}
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
/* Should only be called during free */
sg_free_table(obj->pages);
kfree(obj->pages);
sg_free_table(pages);
kfree(pages);
}
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
__i915_gem_object_unpin_pages(obj);
if (obj->stolen) {
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen);
@ -590,20 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size);
if (obj->pages == NULL)
goto cleanup;
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
return obj;
cleanup:
@ -698,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
goto err_pages;
}
/* To simplify the initialisation sequence between KMS and GTT,
@ -715,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err;
goto err_pages;
}
vma->pages = obj->pages;
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
i915_gem_object_pin_pages(obj);
return obj;
err_pages:
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;

View file

@ -201,12 +201,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return -EINVAL;
}
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
@ -261,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) {
struct i915_vma *vma;
if (obj->pages &&
obj->madv == I915_MADV_WILLNEED &&
mutex_lock(&obj->mm.lock);
if (obj->mm.pages &&
obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE)
i915_gem_object_unpin_pages(obj);
if (!i915_gem_object_is_tiled(obj))
i915_gem_object_pin_pages(obj);
if (args->tiling_mode == I915_TILING_NONE) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_unpin_pages(obj);
obj->mm.quirked = false;
}
if (!i915_gem_object_is_tiled(obj)) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
}
mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence)
@ -302,8 +308,6 @@ err:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv);
return err;
}
@ -327,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
int err = -ENOENT;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {
args->tiling_mode =
READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
err = 0;
}
rcu_read_unlock();
if (unlikely(err))
return err;
args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@ -340,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
default:
DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
@ -357,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
i915_gem_object_put_unlocked(obj);
return 0;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright 2016 Intel Corporation
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -17,29 +17,49 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _I915_GEM_DMABUF_H_
#define _I915_GEM_DMABUF_H_
#include "i915_drv.h"
#include <linux/dma-buf.h>
static inline struct reservation_object *
i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *timeline,
const char *name)
{
struct dma_buf *dma_buf;
unsigned int i;
u64 fences;
if (obj->base.dma_buf)
dma_buf = obj->base.dma_buf;
else if (obj->base.import_attach)
dma_buf = obj->base.import_attach->dmabuf;
else
return NULL;
lockdep_assert_held(&i915->drm.struct_mutex);
return dma_buf->resv;
timeline->i915 = i915;
timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
if (!timeline->name)
return -ENOMEM;
list_add(&timeline->link, &i915->gt.timelines);
/* Called during early_init before we know how many engines there are */
fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
struct intel_timeline *tl = &timeline->engine[i];
tl->fence_context = fences++;
tl->common = timeline;
spin_lock_init(&tl->lock);
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
}
return 0;
}
#endif
void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
{
lockdep_assert_held(&tl->i915->drm.struct_mutex);
list_del(&tl->link);
kfree(tl->name);
}

View file

@ -0,0 +1,72 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef I915_GEM_TIMELINE_H
#define I915_GEM_TIMELINE_H
#include <linux/list.h>
#include "i915_gem_request.h"
struct i915_gem_timeline;
struct intel_timeline {
u64 fence_context;
u32 last_submitted_seqno;
spinlock_t lock;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head requests;
/* Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
* the request using i915_gem_active_get_request_rcu(), or hold the
* struct_mutex.
*/
struct i915_gem_active last_request;
u32 sync_seqno[I915_NUM_ENGINES];
struct i915_gem_timeline *common;
};
struct i915_gem_timeline {
struct list_head link;
atomic_t next_seqno;
struct drm_i915_private *i915;
const char *name;
struct intel_timeline engine[I915_NUM_ENGINES];
};
int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
#endif

View file

@ -61,33 +61,26 @@ struct i915_mmu_object {
bool attached;
};
static void wait_rendering(struct drm_i915_gem_object *obj)
{
unsigned long active = __I915_BO_ACTIVE(obj);
int idx;
for_each_active(active, idx)
i915_gem_active_wait_unlocked(&obj->last_read[idx],
0, NULL, NULL);
}
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
wait_rendering(obj);
i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
if (obj->pages != NULL) {
/* We are inside a kthread context and can't be interrupted */
WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_gem_object_put_pages(obj));
}
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
WARN_ONCE(obj->mm.pages,
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
obj->bind_count,
atomic_read(&obj->mm.pages_pin_count),
obj->pin_display);
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
@ -436,24 +429,25 @@ err:
return ret;
}
static int
static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
struct sg_table *pages;
int ret;
ret = st_set_pages(&obj->pages, pvec, num_pages);
ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
return ret;
return ERR_PTR(ret);
ret = i915_gem_gtt_prepare_object(obj);
ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) {
sg_free_table(obj->pages);
kfree(obj->pages);
obj->pages = NULL;
sg_free_table(pages);
kfree(pages);
return ERR_PTR(ret);
}
return ret;
return pages;
}
static int
@ -497,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
struct drm_device *dev = obj->base.dev;
const int npages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
@ -533,33 +526,32 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
mutex_lock(&dev->struct_mutex);
mutex_lock(&obj->mm.lock);
if (obj->userptr.work == &work->work) {
struct sg_table *pages = ERR_PTR(ret);
if (pinned == npages) {
ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
if (ret == 0) {
list_add_tail(&obj->global_list,
&to_i915(dev)->mm.unbound_list);
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
if (!IS_ERR(pages)) {
__i915_gem_object_set_pages(obj, pages);
pinned = 0;
pages = NULL;
}
}
obj->userptr.work = ERR_PTR(ret);
}
obj->userptr.workers--;
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
obj->userptr.work = ERR_CAST(pages);
}
mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
i915_gem_object_put(obj);
put_task_struct(work->task);
kfree(work);
}
static int
static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
{
@ -584,15 +576,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
return -EAGAIN;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work;
obj->userptr.workers++;
work->obj = i915_gem_object_get(obj);
@ -603,14 +591,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
schedule_work(&work->work);
*active = true;
return -EAGAIN;
return ERR_PTR(-EAGAIN);
}
static int
static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
struct sg_table *pages;
int pinned, ret;
bool active;
@ -634,15 +623,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
return PTR_ERR(obj->userptr.work);
return ERR_CAST(obj->userptr.work);
else
return -EAGAIN;
return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
return ret;
return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
@ -651,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@ -660,21 +649,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
active = false;
if (pinned < 0)
ret = pinned, pinned = 0;
pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret) {
pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
}
drm_free_large(pvec);
return ret;
return pages;
}
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@ -682,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0;
if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false;
i915_gem_gtt_finish_object(obj);
i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
obj->dirty = 0;
obj->mm.dirty = false;
sg_free_table(obj->pages);
kfree(obj->pages);
sg_free_table(pages);
kfree(pages);
}
static void
@ -717,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
@ -816,7 +807,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
if (ret)
return ret;

View file

@ -415,17 +415,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
ee->semaphore_mboxes[0],
ee->semaphore_seqno[0]);
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
ee->semaphore_mboxes[1],
ee->semaphore_seqno[1]);
if (HAS_VEBOX(m->i915)) {
err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
ee->semaphore_mboxes[2],
ee->semaphore_seqno[2]);
}
err_printf(m, " SYNC_0: 0x%08x\n",
ee->semaphore_mboxes[0]);
err_printf(m, " SYNC_1: 0x%08x\n",
ee->semaphore_mboxes[1]);
if (HAS_VEBOX(m->i915))
err_printf(m, " SYNC_2: 0x%08x\n",
ee->semaphore_mboxes[2]);
}
if (USES_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@ -546,9 +542,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "Time: %ld s %ld us\n",
error->time.tv_sec, error->time.tv_usec);
err_printf(m, "Boottime: %ld s %ld us\n",
error->boottime.tv_sec, error->boottime.tv_usec);
err_printf(m, "Uptime: %ld s %ld us\n",
error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@ -702,6 +702,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "Semaphores", error->semaphore);
print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@ -782,6 +784,7 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore);
i915_error_object_free(error->guc_log);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
@ -880,17 +883,17 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
err->wseqno = __active_get_seqno(&obj->last_write);
err->engine = __active_get_engine_id(&obj->last_write);
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&vma->last_write);
err->engine = __active_get_engine_id(&vma->last_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->dirty = obj->mm.dirty;
err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level;
}
@ -965,6 +968,26 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
}
}
static inline u32
gen8_engine_sync_index(struct intel_engine_cs *engine,
struct intel_engine_cs *other)
{
int idx;
/*
* rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
* vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
* bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - engine) - 1;
if (idx < 0)
idx += I915_NUM_ENGINES;
return idx;
}
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
@ -988,10 +1011,9 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore->pages[0];
idx = intel_engine_sync_index(engine, to);
idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
@ -1002,14 +1024,9 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
if (HAS_VEBOX(dev_priv)) {
if (HAS_VEBOX(dev_priv))
ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
}
}
static void error_record_engine_waiters(struct intel_engine_cs *engine,
@ -1026,7 +1043,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
if (!spin_trylock(&b->lock)) {
if (!spin_trylock_irq(&b->lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@ -1034,7 +1051,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
waiter = NULL;
if (count)
@ -1044,7 +1061,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
if (!spin_trylock(&b->lock)) {
if (!spin_trylock_irq(&b->lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@ -1062,7 +1079,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
}
static void error_record_engine_registers(struct drm_i915_error_state *error,
@ -1103,7 +1120,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
ee->last_seqno = engine->last_submitted_seqno;
ee->last_seqno = intel_engine_last_submit(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
@ -1169,7 +1186,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
erq->seqno = request->fence.seqno;
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
@ -1188,7 +1205,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
list_for_each_entry_from(request, &engine->request_list, link)
list_for_each_entry_from(request, &engine->timeline->requests, link)
count++;
if (!count)
return;
@ -1201,7 +1218,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
list_for_each_entry_from(request, &engine->request_list, link) {
list_for_each_entry_from(request, &engine->timeline->requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@ -1408,6 +1425,17 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
return;
error->guc_log = i915_error_object_create(dev_priv,
dev_priv->guc.log.vma);
}
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
@ -1532,8 +1560,13 @@ static int capture(void *data)
i915_gem_record_rings(error->i915, error);
i915_capture_active_buffers(error->i915, error);
i915_capture_pinned_buffers(error->i915, error);
i915_gem_capture_guc_log_buffer(error->i915, error);
do_gettimeofday(&error->time);
error->boottime = ktime_to_timeval(ktime_get_boottime());
error->uptime =
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);

View file

@ -23,6 +23,8 @@
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/relay.h>
#include "i915_drv.h"
#include "intel_guc.h"
@ -85,6 +87,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
mutex_lock(&guc->action_lock);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dev_priv->guc.action_count += 1;
@ -123,6 +126,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
dev_priv->guc.action_status = status;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&guc->action_lock);
return ret;
}
@ -170,6 +174,35 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
{
u32 data[1];
data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
return host2guc_action(guc, data, 1);
}
static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
{
u32 data[2];
data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
data[1] = 0;
return host2guc_action(guc, data, 2);
}
static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
{
u32 data[2];
data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
data[1] = control_val;
return host2guc_action(guc, data, 2);
}
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@ -187,7 +220,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
struct guc_context_desc desc;
size_t len;
doorbell = client->client_base + client->doorbell_offset;
doorbell = client->vaddr + client->doorbell_offset;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
test_bit(client->doorbell_id, doorbell_bitmap)) {
@ -293,7 +326,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
desc = client->client_base + client->proc_desc_offset;
desc = client->vaddr + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@ -380,8 +413,8 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
gfx_addr = i915_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
desc.db_trigger_cpu = (uintptr_t)client->client_base +
client->doorbell_offset;
desc.db_trigger_cpu =
(uintptr_t)client->vaddr + client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
@ -432,7 +465,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
u32 freespace;
int ret;
@ -473,10 +506,9 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct intel_engine_cs *engine = rq->engine;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
u32 freespace, tail, wq_off, wq_page;
u32 freespace, tail, wq_off;
desc = gc->client_base + gc->proc_desc_offset;
desc = gc->vaddr + gc->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
@ -506,10 +538,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
gc->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
wq_off &= PAGE_SIZE - 1;
base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
wqi = (struct guc_wq_item *)((char *)base + wq_off);
wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@ -521,9 +550,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->fence.seqno;
kunmap_atomic(base);
wqi->fence_id = rq->global_seqno;
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@ -533,7 +560,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
desc = gc->client_base + gc->proc_desc_offset;
desc = gc->vaddr + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@ -549,7 +576,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
db = gc->client_base + gc->doorbell_offset;
db = gc->vaddr + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@ -601,6 +628,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
*/
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
unsigned int engine_id = rq->engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
@ -608,6 +636,11 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
/* WA to flush out the pending GMADR writes to ring buffer. */
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@ -616,7 +649,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1;
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->fence.seqno;
guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock);
}
@ -685,14 +718,14 @@ guc_client_free(struct drm_i915_private *dev_priv,
* Be sure to drop any locks
*/
if (client->client_base) {
if (client->vaddr) {
/*
* If we got as far as setting up a doorbell, make sure we
* shut it down before unmapping & deallocating the memory.
*/
guc_disable_doorbell(guc, client);
kunmap(kmap_to_page(client->client_base));
i915_gem_object_unpin_map(client->vma->obj);
}
i915_vma_unpin_and_release(&client->vma);
@ -781,6 +814,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct i915_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@ -807,7 +841,12 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
client->client_base = kmap(i915_vma_first_page(vma));
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr))
goto err;
client->vaddr = vaddr;
spin_lock_init(&client->wq_lock);
client->wq_offset = GUC_DB_SIZE;
@ -847,15 +886,411 @@ err:
return NULL;
}
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
size_t prev_padding)
{
/* Use no-overwrite mode by default, where relay will stop accepting
* new data if there are no empty sub buffers left.
* There is no strict synchronization enforced by relay between Consumer
* and Producer. In overwrite mode, there is a possibility of getting
* inconsistent/garbled data, the producer could be writing on to the
* same sub buffer from which Consumer is reading. This can't be avoided
* unless Consumer is fast enough and can always run in tandem with
* Producer.
*/
if (relay_buf_full(buf))
return 0;
return 1;
}
/*
* file_create() callback. Creates relay file in debugfs.
*/
static struct dentry *create_buf_file_callback(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
/* This to enable the use of a single buffer for the relay channel and
* correspondingly have a single file exposed to User, through which
* it can collect the logs in order without any post-processing.
* Need to set 'is_global' even if parent is NULL for early logging.
*/
*is_global = 1;
if (!parent)
return NULL;
/* Not using the channel filename passed as an argument, since for each
* channel relay appends the corresponding CPU number to the filename
* passed in relay_open(). This should be fine as relay just needs a
* dentry of the file associated with the channel buffer and that file's
* name need not be same as the filename passed as an argument.
*/
buf_file = debugfs_create_file("guc_log", mode,
parent, buf, &relay_file_operations);
return buf_file;
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_callback(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
/* relay channel callbacks */
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_callback,
.create_buf_file = create_buf_file_callback,
.remove_buf_file = remove_buf_file_callback,
};
static void guc_log_remove_relay_file(struct intel_guc *guc)
{
relay_close(guc->log.relay_chan);
}
static int guc_log_create_relay_channel(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = guc->log.vma->obj->base.size;
/* Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take
* up too much memory.
*/
n_subbufs = 8;
guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
n_subbufs, &relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
return -ENOMEM;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
guc->log.relay_chan = guc_log_relay_chan;
return 0;
}
static int guc_log_create_relay_file(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir;
int ret;
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
/* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
* not mounted and so can't create the relay file.
* The relay API seems to fit well with debugfs only, for availing relay
* there are 3 requirements which can be met for debugfs file only in a
* straightforward/clean manner :-
* i) Need the associated dentry pointer of the file, while opening the
* relay channel.
* ii) Should be able to use 'relay_file_operations' fops for the file.
* iii) Set the 'i_private' field of file's inode to the pointer of
* relay channel buffer.
*/
if (!log_dir) {
DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
return -ENODEV;
}
ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
if (ret) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret;
}
return 0;
}
static void guc_move_to_next_buf(struct intel_guc *guc)
{
/* Make sure the updates made in the sub buffer are visible when
* Consumer sees the following update to offset inside the sub buffer.
*/
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
/* Switch to the next sub buffer */
relay_flush(guc->log.relay_chan);
}
static void *guc_get_write_buffer(struct intel_guc *guc)
{
if (!guc->log.relay_chan)
return NULL;
/* Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub
* buffers are full. Could have used the relay_write() to indirectly
* copy the data, but that would have been bit convoluted, as we need to
* write to only certain locations inside a sub buffer which cannot be
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
return relay_reserve(guc->log.relay_chan, 0);
}
static bool
guc_check_log_buf_overflow(struct intel_guc *guc,
enum guc_log_buffer_type type, unsigned int full_cnt)
{
unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
bool overflow = false;
if (full_cnt != prev_full_cnt) {
overflow = true;
guc->log.prev_overflow_count[type] = full_cnt;
guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
if (full_cnt < prev_full_cnt) {
/* buffer_full_cnt is a 4 bit counter */
guc->log.total_overflow_count[type] += 16;
}
DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
}
return overflow;
}
static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_ISR_LOG_BUFFER:
return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
case GUC_DPC_LOG_BUFFER:
return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
case GUC_CRASH_DUMP_LOG_BUFFER:
return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
default:
MISSING_CASE(type);
}
return 0;
}
static void guc_read_update_log_buffer(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
struct guc_log_buffer_state log_buf_state_local;
enum guc_log_buffer_type type;
void *src_data, *dst_data;
bool new_overflow;
if (WARN_ON(!guc->log.buf_addr))
return;
/* Get the pointer to shared GuC log buffer */
log_buf_state = src_data = guc->log.buf_addr;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
/* Actual logs are present from the 2nd page */
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
/* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
buffer_size = guc_get_log_buffer_size(type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
log_buf_state->flush_to_file = 0;
log_buf_state++;
if (unlikely(!log_buf_snapshot_state))
continue;
/* First copy the state structure in snapshot buffer */
memcpy(log_buf_snapshot_state, &log_buf_state_local,
sizeof(struct guc_log_buffer_state));
/* The write pointer could have been updated by GuC firmware,
* after sending the flush interrupt to Host, for consistency
* set write pointer value to same value of sampled_write_ptr
* in the snapshot buffer.
*/
log_buf_snapshot_state->write_ptr = write_offset;
log_buf_snapshot_state++;
/* Now copy the actual logs. */
if (unlikely(new_overflow)) {
/* copy the whole buffer in case of overflow */
read_offset = 0;
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
DRM_ERROR("invalid log buffer state\n");
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
}
/* Just copy the newly written data */
if (read_offset > write_offset) {
i915_memcpy_from_wc(dst_data, src_data, write_offset);
bytes_to_copy = buffer_size - read_offset;
} else {
bytes_to_copy = write_offset - read_offset;
}
i915_memcpy_from_wc(dst_data + read_offset,
src_data + read_offset, bytes_to_copy);
src_data += buffer_size;
dst_data += buffer_size;
}
if (log_buf_snapshot_state)
guc_move_to_next_buf(guc);
else {
/* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
guc->log.capture_miss_count++;
}
}
static void guc_capture_logs_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, guc.log.flush_work);
i915_guc_capture_logs(dev_priv);
}
static void guc_log_cleanup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* First disable the flush interrupt */
gen9_disable_guc_interrupts(dev_priv);
if (guc->log.flush_wq)
destroy_workqueue(guc->log.flush_wq);
guc->log.flush_wq = NULL;
if (guc->log.relay_chan)
guc_log_remove_relay_file(guc);
guc->log.relay_chan = NULL;
if (guc->log.buf_addr)
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.buf_addr = NULL;
}
static int guc_log_create_extras(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Nothing to do */
if (i915.guc_log_level < 0)
return 0;
if (!guc->log.buf_addr) {
/* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return ret;
}
guc->log.buf_addr = vaddr;
}
if (!guc->log.relay_chan) {
/* Create a relay channel, so that we have buffers for storing
* the GuC firmware logs, the channel will be linked with a file
* later on when debugfs is registered.
*/
ret = guc_log_create_relay_channel(guc);
if (ret)
return ret;
}
if (!guc->log.flush_wq) {
INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before
* suspend, can potentially get executed after the GFX device is
* suspended.
* By marking the WQ as freezable, we don't have to bother about
* flushing of this work item from the suspend hooks, the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE);
if (guc->log.flush_wq == NULL) {
DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
return -ENOMEM;
}
}
return 0;
}
static void guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
return;
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
@ -865,8 +1300,18 @@ static void guc_log_create(struct intel_guc *guc)
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
vma = guc->log_vma;
vma = guc->log.vma;
if (!vma) {
/* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submisssions.
*/
if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
/* logging will not be enabled */
i915.guc_log_level = -1;
return;
}
vma = guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
@ -874,7 +1319,14 @@ static void guc_log_create(struct intel_guc *guc)
return;
}
guc->log_vma = vma;
guc->log.vma = vma;
if (guc_log_create_extras(guc)) {
guc_log_cleanup(guc);
i915_vma_unpin_and_release(&guc->log.vma);
i915.guc_log_level = -1;
return;
}
}
/* each allocated unit is a page */
@ -884,7 +1336,37 @@ static void guc_log_create(struct intel_guc *guc)
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
static int guc_log_late_setup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (i915.guc_log_level < 0)
return -EINVAL;
/* If log_level was set as -1 at boot time, then setup needed to
* handle log buffer flush interrupts would not have been done yet,
* so do that now.
*/
ret = guc_log_create_extras(guc);
if (ret)
goto err;
ret = guc_log_create_relay_file(guc);
if (ret)
goto err;
return 0;
err:
guc_log_cleanup(guc);
/* logging will remain off */
i915.guc_log_level = -1;
return ret;
}
static void guc_policies_init(struct guc_policies *policies)
@ -1006,6 +1488,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
mutex_init(&guc->action_lock);
guc_log_create(guc);
guc_addon_create(guc);
@ -1039,7 +1522,8 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
engine->submit_request = i915_guc_submit;
/* Replay the current set of previously submitted requests */
list_for_each_entry(request, &engine->request_list, link) {
list_for_each_entry(request,
&engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
if (i915_sw_fence_done(&request->submit))
i915_guc_submit(request);
@ -1068,7 +1552,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
i915_vma_unpin_and_release(&guc->ads_vma);
i915_vma_unpin_and_release(&guc->log_vma);
i915_vma_unpin_and_release(&guc->log.vma);
if (guc->ctx_pool_vma)
ida_destroy(&guc->ctx_ids);
@ -1089,6 +1573,8 @@ int intel_guc_suspend(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
gen9_disable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
@ -1115,6 +1601,9 @@ int intel_guc_resume(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
if (i915.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
@ -1124,3 +1613,104 @@ int intel_guc_resume(struct drm_device *dev)
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
{
guc_read_update_log_buffer(&dev_priv->guc);
/* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
intel_runtime_pm_get(dev_priv);
host2guc_logbuffer_flush_complete(&dev_priv->guc);
intel_runtime_pm_put(dev_priv);
}
void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
return;
/* First disable the interrupts, will be renabled afterwards */
gen9_disable_guc_interrupts(dev_priv);
/* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&dev_priv->guc.log.flush_work);
/* Ask GuC to update the log buffer state */
host2guc_force_logbuffer_flush(&dev_priv->guc);
/* GuC would have updated log buffer by now, so capture it */
i915_guc_capture_logs(dev_priv);
}
void i915_guc_unregister(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_cleanup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
void i915_guc_register(struct drm_i915_private *dev_priv)
{
if (!i915.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_late_setup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
union guc_log_control log_param;
int ret;
log_param.value = control_val;
if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
return -EINVAL;
/* This combination doesn't make sense & won't have any effect */
if (!log_param.logging_enabled && (i915.guc_log_level < 0))
return 0;
ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
if (ret < 0) {
DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
return ret;
}
i915.guc_log_level = log_param.verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file
* wouldn't have been created by now and interrupts also would not have
* been enabled.
*/
if (!dev_priv->guc.log.relay_chan) {
ret = guc_log_late_setup(&dev_priv->guc);
if (!ret)
gen9_enable_guc_interrupts(dev_priv);
} else if (!log_param.logging_enabled) {
/* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
* buffer state and then collect the left over logs.
*/
i915_guc_flush_logs(dev_priv);
/* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1;
} else {
/* In case interrupts were disabled, enable them now */
gen9_enable_guc_interrupts(dev_priv);
}
return ret;
}

View file

@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
new_val = dev_priv->pm_irq_mask;
new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
if (new_val != dev_priv->pm_imr) {
dev_priv->pm_imr = new_val;
I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, mask);
}
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
uint32_t mask)
static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
__gen6_disable_pm_irq(dev_priv, mask);
__gen6_mask_pm_irq(dev_priv, mask);
}
void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
assert_spin_locked(&dev_priv->irq_lock);
I915_WRITE(reg, reset_mask);
I915_WRITE(reg, reset_mask);
POSTING_READ(reg);
}
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
assert_spin_locked(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
gen6_unmask_pm_irq(dev_priv, enable_mask);
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
assert_spin_locked(&dev_priv->irq_lock);
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
/* though a barrier is missing here, but don't really need a one */
}
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
if (!dev_priv->guc.interrupts_enabled) {
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
dev_priv->pm_guc_events);
dev_priv->guc.interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->guc.interrupts_enabled = false;
gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
gen9_reset_guc_interrupts(dev_priv);
}
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *vblank_time,
unsigned flags)
{
struct drm_crtc *crtc;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
if (pipe >= INTEL_INFO(dev)->num_pipes) {
if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
crtc = intel_get_crtc_for_pipe(dev, pipe);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
if (!crtc->hwmode.crtc_clock) {
if (!crtc->base.hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
&crtc->hwmode);
&crtc->base.hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@ -1085,7 +1141,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
@ -1324,11 +1380,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
if (master_ctl & GEN8_GT_PM_IRQ) {
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
if (gt_iir[2] & dev_priv->pm_rps_events) {
if (gt_iir[2] & (dev_priv->pm_rps_events |
dev_priv->pm_guc_events)) {
I915_WRITE_FW(GEN8_GT_IIR(2),
gt_iir[2] & dev_priv->pm_rps_events);
gt_iir[2] & (dev_priv->pm_rps_events |
dev_priv->pm_guc_events));
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
@ -1360,6 +1418,9 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
if (gt_iir[2] & dev_priv->pm_guc_events)
gen9_guc_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@ -1586,7 +1647,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
@ -1606,6 +1667,41 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
{
if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
/* Sample the log buffer flush related bits & clear them out now
* itself from the message identity register to minimize the
* probability of losing a flush interrupt, when there are back
* to back flush interrupts.
* There can be a new flush interrupt, for different log buffer
* type (like for ISR), whilst Host is handling one (for DPC).
* Since same bit is used in message register for ISR & DPC, it
* could happen that GuC sets the bit for 2nd interrupt but Host
* clears out the bit on handling the 1st interrupt.
*/
u32 msg, flush;
msg = I915_READ(SOFT_SCRATCH(15));
flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
GUC2HOST_MSG_FLUSH_LOG_BUFFER);
if (flush) {
/* Clear the message bits that are handled */
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
/* Handle flush interrupt in bottom half */
queue_work(dev_priv->guc.log.flush_wq,
&dev_priv->guc.log.flush_work);
dev_priv->guc.log.flush_interrupt_count++;
} else {
/* Not clearing of unhandled event bits won't result in
* re-triggering of the interrupt.
*/
}
}
}
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@ -2408,7 +2504,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
fault_errors);
}
@ -2752,420 +2848,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static bool
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
if (INTEL_GEN(engine->i915) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER);
}
}
static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
enum intel_engine_id id;
if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv, id) {
if (engine == signaller)
continue;
if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
return signaller;
}
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
for_each_engine(signaller, dev_priv, id) {
if(engine == signaller)
continue;
if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
return signaller;
}
}
DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
engine->name, ipehr, offset);
return ERR_PTR(-ENODEV);
}
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
void __iomem *vaddr;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
/*
* This function does not support execlist mode - any attempt to
* proceed further into this function will result in a kernel panic
* when dereferencing ring->buffer, which is not set up in execlist
* mode.
*
* The correct way of doing it would be to derive the currently
* executing ring buffer from the current context, which is derived
* from the currently running request. Unfortunately, to get the
* current request we would have to grab the struct_mutex before doing
* anything else, which would be ill-advised since some other thread
* might have grabbed it already and managed to hang itself, causing
* the hang checker to deadlock.
*
* Therefore, this function does not support execlist mode in its
* current form. Just return NULL and move on.
*/
if (engine->buffer == NULL)
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
if (!ipehr_is_semaphore_wait(engine, ipehr))
return NULL;
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
* or 4 dwords depending on the semaphore wait command size.
* Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
vaddr = (void __iomem *)engine->buffer->vaddr;
for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
head &= engine->buffer->size - 1;
/* This here seems to blow up */
cmd = ioread32(vaddr + head);
if (cmd == ipehr)
break;
head -= 4;
}
if (!i)
return NULL;
*seqno = ioread32(vaddr + head + 4) + 1;
if (INTEL_GEN(dev_priv) >= 8) {
offset = ioread32(vaddr + head + 12);
offset <<= 32;
offset |= ioread32(vaddr + head + 8);
}
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
static int semaphore_passed(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
u32 seqno;
engine->hangcheck.deadlock++;
signaller = semaphore_waits_for(engine, &seqno);
if (signaller == NULL)
return -1;
if (IS_ERR(signaller))
return 0;
/* Prevent pathological recursion due to driver bugs */
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
semaphore_passed(signaller) < 0)
return -1;
return 0;
}
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
engine->hangcheck.deadlock = 0;
}
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
u32 tmp = current_instdone | *old_instdone;
bool unchanged;
unchanged = tmp == *old_instdone;
*old_instdone |= tmp;
return unchanged;
}
static bool subunits_stuck(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_instdone instdone;
struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
bool stuck;
int slice;
int subslice;
if (engine->id != RCS)
return true;
intel_engine_get_instdone(engine, &instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
* accumulating the undone -> done transitions and only
* consider those as progress.
*/
stuck = instdone_unchanged(instdone.instdone,
&accu_instdone->instdone);
stuck &= instdone_unchanged(instdone.slice_common,
&accu_instdone->slice_common);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
&accu_instdone->sampler[slice][subslice]);
stuck &= instdone_unchanged(instdone.row[slice][subslice],
&accu_instdone->row[slice][subslice]);
}
return stuck;
}
static enum intel_engine_hangcheck_action
head_stuck(struct intel_engine_cs *engine, u64 acthd)
{
if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
return HANGCHECK_ACTIVE;
}
if (!subunits_stuck(engine))
return HANGCHECK_ACTIVE;
return HANGCHECK_HUNG;
}
static enum intel_engine_hangcheck_action
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
{
struct drm_i915_private *dev_priv = engine->i915;
enum intel_engine_hangcheck_action ha;
u32 tmp;
ha = head_stuck(engine, acthd);
if (ha != HANGCHECK_HUNG)
return ha;
if (IS_GEN2(dev_priv))
return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
* and break the hang. This should work on
* all but the second generation chipsets.
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
i915_handle_error(dev_priv, 0,
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
case 0:
return HANGCHECK_WAIT;
}
}
return HANGCHECK_HUNG;
}
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
* Further, acthd is inspected to see if the ring is stuck. On stuck case
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
static void i915_hangcheck_elapsed(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
#define BUSY 1
#define KICK 5
#define HUNG 20
#define ACTIVE_DECAY 15
if (!i915.enable_hangcheck)
return;
if (!READ_ONCE(dev_priv->gt.awake))
return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
u32 submit;
semaphore_clear_deadlocks(dev_priv);
/* We don't strictly need an irq-barrier here, as we are not
* serving an interrupt request, be paranoid in case the
* barrier has side-effects (such as preventing a broken
* cacheline snoop) and so be sure that we can see the seqno
* advance. If the seqno should stick, due to a stale
* cacheline, we would erroneously declare the GPU hung.
*/
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
acthd = intel_engine_get_active_head(engine);
seqno = intel_engine_get_seqno(engine);
submit = READ_ONCE(engine->last_submitted_seqno);
if (engine->hangcheck.seqno == seqno) {
if (i915_seqno_passed(seqno, submit)) {
engine->hangcheck.action = HANGCHECK_IDLE;
} else {
/* We always increment the hangcheck score
* if the engine is busy and still processing
* the same request, so that no single request
* can run indefinitely (such as a chain of
* batches). The only time we do not increment
* the hangcheck score on this ring, if this
* engine is in a legitimate wait for another
* engine. In that case the waiting engine is a
* victim and we want to be sure we catch the
* right culprit. Then every time we do kick
* the ring, add a small increment to the
* score so that we can catch a batch that is
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
engine->hangcheck.action =
engine_stuck(engine, acthd);
switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
engine->hangcheck.score += HUNG;
break;
}
}
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
hung |= intel_engine_flag(engine);
if (engine->hangcheck.action != HANGCHECK_HUNG)
stuck |= intel_engine_flag(engine);
}
} else {
engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
if (engine->hangcheck.score > 0)
engine->hangcheck.score -= ACTIVE_DECAY;
if (engine->hangcheck.score < 0)
engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
acthd = 0;
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
}
engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd;
busy_count += busy;
}
if (hung) {
char msg[80];
unsigned int tmp;
int len;
/* If some rings hung but others were still busy, only
* blame the hanging rings in the synopsis.
*/
if (stuck != hung)
hung &= ~stuck;
len = scnprintf(msg, sizeof(msg),
"%s on ", stuck == hung ? "No progress" : "Hang");
for_each_engine_masked(engine, dev_priv, hung, tmp)
len += scnprintf(msg + len, sizeof(msg) - len,
"%s, ", engine->name);
msg[len-2] = '\0';
return i915_handle_error(dev_priv, hung, msg);
}
/* Reset timer in case GPU hangs without another request being added */
if (busy_count)
i915_queue_hangcheck(dev_priv);
}
static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@ -3545,11 +3227,13 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_VEBOX(dev))
if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
dev_priv->pm_irq_mask = 0xffffffff;
GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
dev_priv->pm_imr = 0xffffffff;
GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@ -3669,14 +3353,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
dev_priv->pm_irq_mask = 0xffffffff;
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@ -4460,6 +4145,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
if (HAS_GUC_SCHED(dev))
dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
@ -4481,9 +4169,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 8)
dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;

View file

@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS, \
.has_logical_ring_contexts = 1
.has_logical_ring_contexts = 1, \
.has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
.has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
.has_64bit_reloc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,

View file

@ -830,96 +830,7 @@ enum skl_disp_power_wells {
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
/**
* DOC: DPIO
*
* VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
* registers, and instead their registers must be accessed trough IOSF
* sideband. VLV has one such PHY for driving ports B and C, and CHV
* adds another PHY for driving port D. Each PHY responds to specific
* IOSF-SB port.
*
* Each display PHY is made up of one or two channels. Each channel
* houses a common lane part which contains the PLL and other common
* logic. CH0 common lane also contains the IOSF-SB logic for the
* Common Register Interface (CRI) ie. the DPIO registers. CRI clock
* must be running when any DPIO registers are accessed.
*
* In addition to having their own registers, the PHYs are also
* controlled through some dedicated signals from the display
* controller. These include PLL reference clock enable, PLL enable,
* and CRI clock selection, for example.
*
* Eeach channel also has two splines (also called data lanes), and
* each spline is made up of one Physical Access Coding Sub-Layer
* (PCS) block and two TX lanes. So each channel has two PCS blocks
* and four TX lanes. The TX lanes are used as DP lanes or TMDS
* data/clock pairs depending on the output type.
*
* Additionally the PHY also contains an AUX lane with AUX blocks
* for each channel. This is used for DP AUX communication, but
* this fact isn't really relevant for the driver since AUX is
* controlled from the display controller side. No DPIO registers
* need to be accessed during AUX communication,
*
* Generally on VLV/CHV the common lane corresponds to the pipe and
* the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
* pipe A == CMN/PLL/REF CH0
*
* pipe B == CMN/PLL/REF CH1
*
* port B == PCS/TX CH0
*
* port C == PCS/TX CH1
*
* This is especially important when we cross the streams
* ie. drive port B with pipe B, or port C with pipe A.
*
* For single channel PHY (CHV):
*
* pipe C == CMN/PLL/REF CH0
*
* port D == PCS/TX CH0
*
* On BXT the entire PHY channel corresponds to the port. That means
* the PLL is also now associated with the port rather than the pipe,
* and so the clock needs to be routed to the appropriate transcoder.
* Port A PLL is directly connected to transcoder EDP and port B/C
* PLLs can be routed to any transcoder A/B/C.
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
* digital port D (CHV) or port A (BXT). ::
*
*
* Dual channel PHY (VLV/CHV/BXT)
* ---------------------------------
* | CH0 | CH1 |
* | CMN/PLL/REF | CMN/PLL/REF |
* |---------------|---------------| Display PHY
* | PCS01 | PCS23 | PCS01 | PCS23 |
* |-------|-------|-------|-------|
* |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
* ---------------------------------
* | DDI0 | DDI1 | DP/HDMI ports
* ---------------------------------
*
* Single channel PHY (CHV/BXT)
* -----------------
* | CH0 |
* | CMN/PLL/REF |
* |---------------| Display PHY
* | PCS01 | PCS23 |
* |-------|-------|
* |TX0|TX1|TX2|TX3|
* -----------------
* | DDI2 | DP/HDMI port
* -----------------
*/
/* DPIO registers */
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
@ -1275,7 +1186,19 @@ enum skl_disp_power_wells {
#define DPIO_UPAR_SHIFT 30
/* BXT PHY registers */
#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
#define _BXT_PHY0_BASE 0x6C000
#define _BXT_PHY1_BASE 0x162000
#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
_BXT_PHY1_BASE)
#define _BXT_PHY(phy, reg) \
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
(BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
(reg_ch1) - _BXT_PHY0_BASE))
#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
@ -1292,8 +1215,8 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
_PHY_CTL_FAMILY_EDP)
#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
_PHY_CTL_FAMILY_EDP)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@ -1313,18 +1236,18 @@ enum skl_disp_power_wells {
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
_PORT_PLL_EBB_4_B, \
_PORT_PLL_EBB_4_C)
#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PLL_EBB_4_B, \
_PORT_PLL_EBB_4_C)
#define _PORT_PLL_0_A 0x162100
#define _PORT_PLL_0_B 0x6C100
@ -1355,57 +1278,56 @@ enum skl_disp_power_wells {
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
(idx) * 4)
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
_PORT_CL1CM_DW0_A)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
_PORT_CL1CM_DW9_A)
#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
#define _PORT_CL1CM_DW10_A 0x162028
#define _PORT_CL1CM_DW10_BC 0x6C028
#define IREF1RC_OFFSET_SHIFT 8
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
_PORT_CL1CM_DW10_A)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
#define SUS_CLK_CONFIG 0x3
#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
_PORT_CL1CM_DW28_A)
#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
#define _PORT_CL1CM_DW30_A 0x162078
#define _PORT_CL1CM_DW30_BC 0x6C078
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
_PORT_CL1CM_DW30_A)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
/* Defined for PHY0 only */
#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
*/
#define _PORT_CL2CM_DW6_A 0x162358
#define _PORT_CL2CM_DW6_BC 0x6C358
#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
#define GRC_DONE (1 << 22)
#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
_PORT_REF_DW3_A)
#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
@ -1416,15 +1338,13 @@ enum skl_disp_power_wells {
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
_PORT_REF_DW6_A)
#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
#define _PORT_REF_DW8_A 0x1621A0
#define _PORT_REF_DW8_BC 0x6C1A0
#define GRC_DIS (1 << 15)
#define GRC_RDY_OVRD (1 << 1)
#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
_PORT_REF_DW8_A)
#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
/* BXT PHY PCS registers */
#define _PORT_PCS_DW10_LN01_A 0x162428
@ -1433,12 +1353,13 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
_PORT_PCS_DW10_LN01_B, \
_PORT_PCS_DW10_LN01_C)
#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
_PORT_PCS_DW10_GRP_B, \
_PORT_PCS_DW10_GRP_C)
#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PCS_DW10_LN01_B, \
_PORT_PCS_DW10_LN01_C)
#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PCS_DW10_GRP_B, \
_PORT_PCS_DW10_GRP_C)
#define TX2_SWING_CALC_INIT (1 << 31)
#define TX1_SWING_CALC_INIT (1 << 30)
@ -1453,15 +1374,15 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
_PORT_PCS_DW12_LN01_B, \
_PORT_PCS_DW12_LN01_C)
#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
_PORT_PCS_DW12_LN23_B, \
_PORT_PCS_DW12_LN23_C)
#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
_PORT_PCS_DW12_GRP_B, \
_PORT_PCS_DW12_GRP_C)
#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PCS_DW12_LN01_B, \
_PORT_PCS_DW12_LN01_C)
#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PCS_DW12_LN23_B, \
_PORT_PCS_DW12_LN23_C)
#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PCS_DW12_GRP_B, \
_PORT_PCS_DW12_GRP_C)
/* BXT PHY TX registers */
#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
@ -1473,12 +1394,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
_PORT_TX_DW2_GRP_B, \
_PORT_TX_DW2_GRP_C)
#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
_PORT_TX_DW2_LN0_B, \
_PORT_TX_DW2_LN0_C)
#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_TX_DW2_LN0_B, \
_PORT_TX_DW2_LN0_C)
#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_TX_DW2_GRP_B, \
_PORT_TX_DW2_GRP_C)
#define MARGIN_000_SHIFT 16
#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
#define UNIQ_TRANS_SCALE_SHIFT 8
@ -1490,12 +1411,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
_PORT_TX_DW3_GRP_B, \
_PORT_TX_DW3_GRP_C)
#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
_PORT_TX_DW3_LN0_B, \
_PORT_TX_DW3_LN0_C)
#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_TX_DW3_LN0_B, \
_PORT_TX_DW3_LN0_C)
#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_TX_DW3_GRP_B, \
_PORT_TX_DW3_GRP_C)
#define SCALE_DCOMP_METHOD (1 << 26)
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
@ -1505,12 +1426,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
_PORT_TX_DW4_LN0_B, \
_PORT_TX_DW4_LN0_C)
#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
_PORT_TX_DW4_GRP_B, \
_PORT_TX_DW4_GRP_C)
#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_TX_DW4_LN0_B, \
_PORT_TX_DW4_LN0_C)
#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_TX_DW4_GRP_B, \
_PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
@ -1519,10 +1440,10 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
_PORT_TX_DW14_LN0_B, \
_PORT_TX_DW14_LN0_C) + \
_BXT_LANE_OFFSET(lane))
#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
_MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
_PORT_TX_DW14_LN0_C) + \
_BXT_LANE_OFFSET(lane))
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
@ -2188,8 +2109,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_STATUS2 _MMIO(0x43214)
#define FBC_COMPRESSION_MASK 0x7ff
#define FBC_STATUS2 _MMIO(0x43214)
#define IVB_FBC_COMPRESSION_MASK 0x7ff
#define BDW_FBC_COMPRESSION_MASK 0xfff
#define FBC_LL_SIZE (1536)
@ -6015,6 +5937,7 @@ enum {
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
#define GEN8_GT_VECS_IRQ (1<<6)
#define GEN8_GT_GUC_IRQ (1<<5)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
@ -6026,6 +5949,16 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
#define GEN9_GUC_DISPLAY_EVENT (1<<29)
#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
#define GEN9_GUC_DB_RING_EVENT (1<<26)
#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
@ -7358,6 +7291,13 @@ enum {
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
#define AUD_CONFIG_M_MASK 0xfffff
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)

View file

@ -13,6 +13,8 @@
#include "i915_sw_fence.h"
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
static DEFINE_SPINLOCK(i915_sw_fence_lock);
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
@ -135,6 +137,8 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
list_del(&wq->task_list);
__i915_sw_fence_complete(wq->private, key);
i915_sw_fence_put(wq->private);
if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
kfree(wq);
return 0;
}
@ -192,9 +196,9 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_t *wq)
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_t *wq, gfp_t gfp)
{
unsigned long flags;
int pending;
@ -206,8 +210,22 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
pending = 0;
if (!wq) {
wq = kmalloc(sizeof(*wq), gfp);
if (!wq) {
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
i915_sw_fence_wait(signaler);
return 0;
}
pending |= I915_SW_FENCE_FLAG_ALLOC;
}
INIT_LIST_HEAD(&wq->task_list);
wq->flags = 0;
wq->flags = pending;
wq->func = i915_sw_fence_wake;
wq->private = i915_sw_fence_get(fence);
@ -226,6 +244,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_t *wq)
{
return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
}
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
gfp_t gfp)
{
return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
}
struct i915_sw_dma_fence_cb {
struct dma_fence_cb base;
struct i915_sw_fence *fence;

View file

@ -46,6 +46,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
unsigned long timeout,
@ -62,4 +65,9 @@ static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
return atomic_read(&fence->pending) < 0;
}
static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
{
wait_event(fence->wait, i915_sw_fence_done(fence));
}
#endif /* _I915_SW_FENCE_H_ */

View file

@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id;
__entry->sync_to = to->engine->id;
__entry->seqno = from->fence.seqno;
__entry->seqno = from->global_seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@ -489,7 +489,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno;
__entry->seqno = req->global_seqno;
__entry->flags = flags;
dma_fence_enable_sw_signaling(&req->fence);
),
@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno;
__entry->seqno = req->global_seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno;
__entry->seqno = req->global_seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
),

View file

@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->wait_req = NULL;
return state;
}
@ -101,7 +100,6 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}

View file

@ -57,6 +57,63 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
/* DP N/M table */
#define LC_540M 540000
#define LC_270M 270000
#define LC_162M 162000
struct dp_aud_n_m {
int sample_rate;
int clock;
u16 m;
u16 n;
};
/* Values according to DP 1.4 Table 2-104 */
static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 32000, LC_162M, 1024, 10125 },
{ 44100, LC_162M, 784, 5625 },
{ 48000, LC_162M, 512, 3375 },
{ 64000, LC_162M, 2048, 10125 },
{ 88200, LC_162M, 1568, 5625 },
{ 96000, LC_162M, 1024, 3375 },
{ 128000, LC_162M, 4096, 10125 },
{ 176400, LC_162M, 3136, 5625 },
{ 192000, LC_162M, 2048, 3375 },
{ 32000, LC_270M, 1024, 16875 },
{ 44100, LC_270M, 784, 9375 },
{ 48000, LC_270M, 512, 5625 },
{ 64000, LC_270M, 2048, 16875 },
{ 88200, LC_270M, 1568, 9375 },
{ 96000, LC_270M, 1024, 5625 },
{ 128000, LC_270M, 4096, 16875 },
{ 176400, LC_270M, 3136, 9375 },
{ 192000, LC_270M, 2048, 5625 },
{ 32000, LC_540M, 1024, 33750 },
{ 44100, LC_540M, 784, 18750 },
{ 48000, LC_540M, 512, 11250 },
{ 64000, LC_540M, 2048, 33750 },
{ 88200, LC_540M, 1568, 18750 },
{ 96000, LC_540M, 1024, 11250 },
{ 128000, LC_540M, 4096, 33750 },
{ 176400, LC_540M, 3136, 18750 },
{ 192000, LC_540M, 2048, 11250 },
};
static const struct dp_aud_n_m *
audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
if (rate == dp_aud_n_m[i].sample_rate &&
intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
return &dp_aud_n_m[i];
}
return NULL;
}
static const struct {
int clock;
u32 config;
@ -225,16 +282,43 @@ hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
int rate = acomp ? acomp->aud_sample_rate[port] : 0;
const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
if (nm)
DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
else
DRM_DEBUG_KMS("using automatic Maud, Naud\n");
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= AUD_CONFIG_N_VALUE_INDEX;
if (nm) {
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(nm->n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
if (nm) {
tmp |= nm->m;
tmp |= AUD_M_CTS_M_VALUE_INDEX;
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
}
static void
@ -254,19 +338,24 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
if (adjusted_mode->crtc_clock == TMDS_296M ||
adjusted_mode->crtc_clock == TMDS_297M) {
n = audio_config_hdmi_get_n(adjusted_mode, rate);
if (n != 0) {
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
DRM_DEBUG_KMS("no suitable N value is found\n");
}
n = audio_config_hdmi_get_n(adjusted_mode, rate);
if (n != 0) {
DRM_DEBUG_KMS("using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
DRM_DEBUG_KMS("using automatic N\n");
}
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp |= AUD_M_CTS_M_PROG_ENABLE;
I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
}
static void
@ -687,7 +776,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
intel_encoder->type != INTEL_OUTPUT_HDMI) {
(intel_encoder->type != INTEL_OUTPUT_HDMI &&
intel_encoder->type != INTEL_OUTPUT_DP)) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;

View file

@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
engine->breadcrumbs.irq_posted = true;
spin_lock_irq(&engine->i915->irq_lock);
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
spin_unlock_irq(&engine->i915->irq_lock);
spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
spin_lock_irq(&engine->i915->irq_lock);
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
spin_unlock_irq(&engine->i915->irq_lock);
spin_unlock(&engine->i915->irq_lock);
engine->breadcrumbs.irq_posted = false;
}
@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
first = __intel_engine_add_wait(engine, wait);
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
return first;
}
@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
if (RB_EMPTY_NODE(&wait->node))
return;
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
if (RB_EMPTY_NODE(&wait->node))
goto out_unlock;
@ -400,7 +402,7 @@ out_unlock:
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
}
static bool signal_complete(struct drm_i915_gem_request *request)
@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
if (request == b->first_signal) {
struct rb_node *rb =
rb_next(&request->signaling.node);
b->first_signal = rb ? to_signaler(rb) : NULL;
}
rb_erase(&request->signaling.node, &b->signals);
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
i915_gem_request_put(request);
} else {
@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
/* locked by dma_fence_enable_sw_signaling() */
/* Note that we may be called from an interrupt handler on another
* device (e.g. nouveau signaling a fence completion causing us
* to submit a request, and so enable signaling). As such,
* we need to make sure that all other users of b->lock protect
* against interrupts, i.e. use spin_lock_irqsave.
*/
/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
assert_spin_locked(&request->lock);
if (!request->global_seqno)
return;
request->signaling.wait.tsk = b->signaler;
request->signaling.wait.seqno = request->fence.seqno;
request->signaling.wait.seqno = request->global_seqno;
i915_gem_request_get(request);
spin_lock(&b->lock);
@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
if (i915_seqno_passed(request->fence.seqno,
to_signaler(parent)->fence.seqno)) {
if (i915_seqno_passed(request->global_seqno,
to_signaler(parent)->global_seqno)) {
p = &parent->rb_right;
first = false;
} else {
@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
spin_lock(&b->lock);
spin_lock_irq(&b->lock);
__intel_breadcrumbs_disable_irq(b);
if (intel_engine_has_waiter(engine)) {
@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
irq_disable(engine);
}
spin_unlock(&b->lock);
spin_unlock_irq(&b->lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)

View file

@ -573,7 +573,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev, pipe);
intel_wait_for_vblank(dev_priv, pipe);
st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :

View file

@ -168,12 +168,6 @@ struct stepping_info {
char substepping;
};
static const struct stepping_info kbl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@ -194,10 +188,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
if (IS_KABYLAKE(dev_priv)) {
size = ARRAY_SIZE(kbl_stepping_info);
si = kbl_stepping_info;
} else if (IS_SKYLAKE(dev_priv)) {
if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {

View file

@ -1547,7 +1547,6 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
{
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
uint32_t val;
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
@ -1576,38 +1575,11 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
}
}
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
val &= ~SCALE_DCOMP_METHOD;
if (ddi_translations[level].enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
val &= ~DE_EMPHASIS;
val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
bxt_ddi_phy_set_signal_level(dev_priv, port,
ddi_translations[level].margin,
ddi_translations[level].scale,
ddi_translations[level].enable,
ddi_translations[level].deemphasis);
}
static uint32_t translate_signal_level(int signal_levels)
@ -1923,332 +1895,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
}
}
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
phy);
return false;
}
if (phy == DPIO_PHY1 &&
!(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
return false;
}
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
phy);
return false;
}
for_each_port_masked(port,
phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
BIT(PORT_A)) {
u32 tmp = I915_READ(BXT_PHY_CTL(port));
if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
"for port %c powered down "
"(PHY_CTL %08x)\n",
phy, port_name(port), tmp);
return false;
}
}
return true;
}
static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (intel_wait_for_register(dev_priv,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val;
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy == DPIO_PHY0)
dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
}
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
/*
* The PHY registers start out inaccessible and respond to reads with
* all 1s. Eventually they become accessible as they power up, then
* the reserved bit will give the default 0. Poll on the reserved bit
* becoming 0 to find when the PHY is accessible.
* HW team confirmed that the time to reach phypowergood status is
* anywhere between 50 us and 100us.
*/
if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
DRM_ERROR("timeout during PHY%d power on\n", phy);
}
/* Program PLL Rcomp code offset */
val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
val &= ~IREF1RC_OFFSET_MASK;
val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
/* Program power gating */
val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
SUS_CLK_CONFIG;
I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
if (phy == DPIO_PHY0) {
val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
val |= DW6_OLDO_DYN_PWR_DOWN_EN;
I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
}
val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
val &= ~OCL2_LDOFUSE_PWR_DIS;
/*
* On PHY1 disable power on the second channel, since no port is
* connected there. On PHY0 both channels have a port, so leave it
* enabled.
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
* power down the second channel on PHY0 as well.
*
* FIXME: Clarify programming of the following, the register is
* read-only with bit 6 fixed at 0 at least in stepping A.
*/
if (phy == DPIO_PHY1)
val |= OCL2_LDOFUSE_PWR_DIS;
I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
if (phy == DPIO_PHY0) {
uint32_t grc_code;
/*
* PHY0 isn't connected to an RCOMP resistor so copy over
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
if (phy == DPIO_PHY1)
bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
uint32_t val;
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val &= ~GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
static bool __printf(6, 7)
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
const char *reg_fmt, ...)
{
struct va_format vaf;
va_list args;
u32 val;
val = I915_READ(reg);
if ((val & mask) == expected)
return true;
va_start(args, reg_fmt);
vaf.fmt = reg_fmt;
vaf.va = &args;
DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
va_end(args);
return false;
}
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
uint32_t mask;
bool ok;
#define _CHK(reg, mask, exp, fmt, ...) \
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
"BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy == DPIO_PHY0)
ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
"BXT_PORT_CL2CM_DW6_BC");
/*
* TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
* at least on stepping A this bit is read-only and fixed at 0.
*/
if (phy == DPIO_PHY0) {
u32 grc_code = dev_priv->bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
grc_code;
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
"BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
"BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
}
return ok;
#undef _CHK
}
static uint8_t
bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
switch (pipe_config->lane_count) {
case 1:
return 0;
case 2:
return BIT(2) | BIT(0);
case 4:
return BIT(3) | BIT(2) | BIT(0);
default:
MISSING_CASE(pipe_config->lane_count);
return 0;
}
}
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
enum port port = dport->port;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int lane;
uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
for (lane = 0; lane < 4; lane++) {
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
val &= ~LATENCY_OPTIM;
if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
val |= LATENCY_OPTIM;
I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
}
}
static uint8_t
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
enum port port = dport->port;
int lane;
uint8_t mask;
mask = 0;
for (lane = 0; lane < 4; lane++) {
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
if (val & LATENCY_OPTIM)
mask |= BIT(lane);
}
return mask;
bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@ -2417,7 +2071,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
pipe_config);
pipe_config->lane_count);
return ret;

View file

@ -282,12 +282,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
} else if (INTEL_GEN(dev_priv) >= 5) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
}
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");

File diff suppressed because it is too large Load diff

View file

@ -213,6 +213,81 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
static int
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
{
if (intel_dp->num_sink_rates) {
*sink_rates = intel_dp->sink_rates;
return intel_dp->num_sink_rates;
}
*sink_rates = default_rates;
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
static int
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
int size;
if (IS_BROXTON(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
*source_rates = default_rates;
size = ARRAY_SIZE(default_rates);
}
/* This depends on the fact that 5.4 is last value in the array */
if (!intel_dp_source_supports_hbr2(intel_dp))
size--;
return size;
}
static int intersect_rates(const int *source_rates, int source_len,
const int *sink_rates, int sink_len,
int *common_rates)
{
int i = 0, j = 0, k = 0;
while (i < source_len && j < sink_len) {
if (source_rates[i] == sink_rates[j]) {
if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
return k;
common_rates[k] = source_rates[i];
++k;
++i;
++j;
} else if (source_rates[i] < sink_rates[j]) {
++i;
} else {
++j;
}
}
return k;
}
static int intel_dp_common_rates(struct intel_dp *intel_dp,
int *common_rates)
{
const int *source_rates, *sink_rates;
int source_len, sink_len;
sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
source_len = intel_dp_source_rates(intel_dp, &source_rates);
return intersect_rates(source_rates, source_len,
sink_rates, sink_len,
common_rates);
}
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@ -320,8 +395,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@ -359,7 +433,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev_priv) ?
if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@ -383,7 +457,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev, pipe);
vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
chv_phy_powergate_ch(dev_priv, phy, ch, false);
@ -1291,19 +1365,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
static int
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
{
if (intel_dp->num_sink_rates) {
*sink_rates = intel_dp->sink_rates;
return intel_dp->num_sink_rates;
}
*sink_rates = default_rates;
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@ -1316,31 +1377,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
return false;
}
static int
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
int size;
if (IS_BROXTON(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
*source_rates = default_rates;
size = ARRAY_SIZE(default_rates);
}
/* This depends on the fact that 5.4 is last value in the array */
if (!intel_dp_source_supports_hbr2(intel_dp))
size--;
return size;
}
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@ -1375,43 +1411,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
static int intersect_rates(const int *source_rates, int source_len,
const int *sink_rates, int sink_len,
int *common_rates)
{
int i = 0, j = 0, k = 0;
while (i < source_len && j < sink_len) {
if (source_rates[i] == sink_rates[j]) {
if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
return k;
common_rates[k] = source_rates[i];
++k;
++i;
++j;
} else if (source_rates[i] < sink_rates[j]) {
++i;
} else {
++j;
}
}
return k;
}
static int intel_dp_common_rates(struct intel_dp *intel_dp,
int *common_rates)
{
const int *source_rates, *sink_rates;
int source_len, sink_len;
sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
source_len = intel_dp_source_rates(intel_dp, &source_rates);
return intersect_rates(source_rates, source_len,
sink_rates, sink_len,
common_rates);
}
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
@ -1451,40 +1450,35 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
bool
__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
{
uint8_t rev;
int len;
u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
DP_SINK_OUI;
if ((drm_debug & DRM_UT_KMS) == 0)
return;
if (!drm_dp_is_branch(intel_dp->dpcd))
return;
len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
if (len < 0)
return;
DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
sizeof(*desc);
}
static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
bool intel_dp_read_desc(struct intel_dp *intel_dp)
{
uint8_t rev[2];
int len;
struct intel_dp_desc *desc = &intel_dp->desc;
bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
DP_OUI_SUPPORT;
int dev_id_len;
if ((drm_debug & DRM_UT_KMS) == 0)
return;
if (!__intel_dp_read_desc(intel_dp, desc))
return false;
if (!drm_dp_is_branch(intel_dp->dpcd))
return;
dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
(int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
dev_id_len, desc->device_id,
desc->hw_rev >> 4, desc->hw_rev & 0xf,
desc->sw_major_rev, desc->sw_minor_rev);
len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
if (len < 0)
return;
DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
return true;
}
static int rate_to_index(int find, const int *rates)
@ -2369,7 +2363,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@ -3492,7 +3486,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@ -3502,7 +3496,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
static bool
bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
@ -3526,6 +3520,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
intel_dp_read_desc(intel_dp);
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
@ -3627,23 +3623,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return true;
}
static void
intel_dp_probe_oui(struct intel_dp *intel_dp)
{
u8 buf[3];
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
@ -3687,7 +3666,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
@ -3708,7 +3687,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
do {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@ -3731,7 +3710,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
@ -3759,14 +3738,14 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
}
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return 0;
}
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int count, ret;
@ -3777,7 +3756,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
return ret;
do {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@ -4010,7 +3989,7 @@ intel_dp_retrain_link(struct intel_dp *intel_dp)
intel_dp_stop_link_train(intel_dp);
/* Keep underrun reporting disabled until things are stable */
intel_wait_for_vblank(&dev_priv->drm, crtc->pipe);
intel_wait_for_vblank(dev_priv, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc->config->has_pch_encoder)
@ -4422,10 +4401,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
intel_dp_probe_oui(intel_dp);
intel_dp_print_hw_revision(intel_dp);
intel_dp_print_sw_revision(intel_dp);
intel_dp_read_desc(intel_dp);
intel_dp_configure_mst(intel_dp);
@ -4489,21 +4465,11 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum drm_connector_status status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (intel_dp->is_mst) {
/* MST devices are disconnected from a monitor POV */
intel_dp_unset_edid(intel_dp);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DP;
return connector_status_disconnected;
}
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
status = intel_dp_long_pulse(intel_dp->attached_connector);

View file

@ -23,6 +23,565 @@
#include "intel_drv.h"
/**
* DOC: DPIO
*
* VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
* registers, and instead their registers must be accessed trough IOSF
* sideband. VLV has one such PHY for driving ports B and C, and CHV
* adds another PHY for driving port D. Each PHY responds to specific
* IOSF-SB port.
*
* Each display PHY is made up of one or two channels. Each channel
* houses a common lane part which contains the PLL and other common
* logic. CH0 common lane also contains the IOSF-SB logic for the
* Common Register Interface (CRI) ie. the DPIO registers. CRI clock
* must be running when any DPIO registers are accessed.
*
* In addition to having their own registers, the PHYs are also
* controlled through some dedicated signals from the display
* controller. These include PLL reference clock enable, PLL enable,
* and CRI clock selection, for example.
*
* Eeach channel also has two splines (also called data lanes), and
* each spline is made up of one Physical Access Coding Sub-Layer
* (PCS) block and two TX lanes. So each channel has two PCS blocks
* and four TX lanes. The TX lanes are used as DP lanes or TMDS
* data/clock pairs depending on the output type.
*
* Additionally the PHY also contains an AUX lane with AUX blocks
* for each channel. This is used for DP AUX communication, but
* this fact isn't really relevant for the driver since AUX is
* controlled from the display controller side. No DPIO registers
* need to be accessed during AUX communication,
*
* Generally on VLV/CHV the common lane corresponds to the pipe and
* the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
* pipe A == CMN/PLL/REF CH0
*
* pipe B == CMN/PLL/REF CH1
*
* port B == PCS/TX CH0
*
* port C == PCS/TX CH1
*
* This is especially important when we cross the streams
* ie. drive port B with pipe B, or port C with pipe A.
*
* For single channel PHY (CHV):
*
* pipe C == CMN/PLL/REF CH0
*
* port D == PCS/TX CH0
*
* On BXT the entire PHY channel corresponds to the port. That means
* the PLL is also now associated with the port rather than the pipe,
* and so the clock needs to be routed to the appropriate transcoder.
* Port A PLL is directly connected to transcoder EDP and port B/C
* PLLs can be routed to any transcoder A/B/C.
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
* digital port D (CHV) or port A (BXT). ::
*
*
* Dual channel PHY (VLV/CHV/BXT)
* ---------------------------------
* | CH0 | CH1 |
* | CMN/PLL/REF | CMN/PLL/REF |
* |---------------|---------------| Display PHY
* | PCS01 | PCS23 | PCS01 | PCS23 |
* |-------|-------|-------|-------|
* |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
* ---------------------------------
* | DDI0 | DDI1 | DP/HDMI ports
* ---------------------------------
*
* Single channel PHY (CHV/BXT)
* -----------------
* | CH0 |
* | CMN/PLL/REF |
* |---------------| Display PHY
* | PCS01 | PCS23 |
* |-------|-------|
* |TX0|TX1|TX2|TX3|
* -----------------
* | DDI2 | DP/HDMI port
* -----------------
*/
/**
* struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
*/
struct bxt_ddi_phy_info {
/**
* @dual_channel: true if this phy has a second channel.
*/
bool dual_channel;
/**
* @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
* Otherwise the GRC value will be copied from the phy indicated by
* this field.
*/
enum dpio_phy rcomp_phy;
/**
* @channel: struct containing per channel information.
*/
struct {
/**
* @port: which port maps to this channel.
*/
enum port port;
} channel[2];
};
static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = true,
.rcomp_phy = DPIO_PHY1,
.channel = {
[DPIO_CH0] = { .port = PORT_B },
[DPIO_CH1] = { .port = PORT_C },
}
},
[DPIO_PHY1] = {
.dual_channel = false,
.rcomp_phy = -1,
.channel = {
[DPIO_CH0] = { .port = PORT_A },
}
},
};
static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
{
return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
BIT(phy_info->channel[DPIO_CH0].port);
}
void bxt_port_to_phy_channel(enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
const struct bxt_ddi_phy_info *phy_info;
int i;
for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
phy_info = &bxt_ddi_phy_info[i];
if (port == phy_info->channel[DPIO_CH0].port) {
*phy = i;
*ch = DPIO_CH0;
return;
}
if (phy_info->dual_channel &&
port == phy_info->channel[DPIO_CH1].port) {
*phy = i;
*ch = DPIO_CH1;
return;
}
}
WARN(1, "PHY not found for PORT %c", port_name(port));
*phy = DPIO_PHY0;
*ch = DPIO_CH0;
}
void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
enum port port, u32 margin, u32 scale,
u32 enable, u32 deemphasis)
{
u32 val;
enum dpio_phy phy;
enum dpio_channel ch;
bxt_port_to_phy_channel(port, &phy, &ch);
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
val &= ~SCALE_DCOMP_METHOD;
if (enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
val &= ~DE_EMPHASIS;
val |= deemphasis << DEEMPH_SHIFT;
I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
}
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
enum port port;
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
phy);
return false;
}
if (phy_info->rcomp_phy == -1 &&
!(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
phy);
return false;
}
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
phy);
return false;
}
for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
u32 tmp = I915_READ(BXT_PHY_CTL(port));
if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
"for port %c powered down "
"(PHY_CTL %08x)\n",
phy, port_name(port), tmp);
return false;
}
}
return true;
}
static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (intel_wait_for_register(dev_priv,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
u32 val;
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
}
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
/*
* The PHY registers start out inaccessible and respond to reads with
* all 1s. Eventually they become accessible as they power up, then
* the reserved bit will give the default 0. Poll on the reserved bit
* becoming 0 to find when the PHY is accessible.
* HW team confirmed that the time to reach phypowergood status is
* anywhere between 50 us and 100us.
*/
if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
DRM_ERROR("timeout during PHY%d power on\n", phy);
}
/* Program PLL Rcomp code offset */
val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
val &= ~IREF1RC_OFFSET_MASK;
val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
/* Program power gating */
val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
SUS_CLK_CONFIG;
I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
if (phy_info->dual_channel) {
val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
val |= DW6_OLDO_DYN_PWR_DOWN_EN;
I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
}
if (phy_info->rcomp_phy != -1) {
uint32_t grc_code;
/*
* PHY0 isn't connected to an RCOMP resistor so copy over
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
phy_info->rcomp_phy);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
val = I915_READ(BXT_PORT_REF_DW8(phy));
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(phy), val);
}
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
if (phy_info->rcomp_phy == -1)
bxt_phy_wait_grc_done(dev_priv, phy);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
uint32_t val;
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val &= ~GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
lockdep_assert_held(&dev_priv->power_domains.lock);
if (rcomp_phy != -1) {
was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
_bxt_ddi_phy_init(dev_priv, rcomp_phy);
}
_bxt_ddi_phy_init(dev_priv, phy);
if (rcomp_phy != -1 && !was_enabled)
bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
}
static bool __printf(6, 7)
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
const char *reg_fmt, ...)
{
struct va_format vaf;
va_list args;
u32 val;
val = I915_READ(reg);
if ((val & mask) == expected)
return true;
va_start(args, reg_fmt);
vaf.fmt = reg_fmt;
vaf.va = &args;
DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
va_end(args);
return false;
}
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
uint32_t mask;
bool ok;
#define _CHK(reg, mask, exp, fmt, ...) \
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
"BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy_info->dual_channel)
ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
"BXT_PORT_CL2CM_DW6(%d)", phy);
if (phy_info->rcomp_phy != -1) {
u32 grc_code = dev_priv->bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
grc_code;
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
"BXT_PORT_REF_DW6(%d)", phy);
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
"BXT_PORT_REF_DW8(%d)", phy);
}
return ok;
#undef _CHK
}
uint8_t
bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
uint8_t lane_count)
{
switch (lane_count) {
case 1:
return 0;
case 2:
return BIT(2) | BIT(0);
case 4:
return BIT(3) | BIT(2) | BIT(0);
default:
MISSING_CASE(lane_count);
return 0;
}
}
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
enum port port = dport->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
bxt_port_to_phy_channel(port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
val &= ~LATENCY_OPTIM;
if (lane_lat_optim_mask & BIT(lane))
val |= LATENCY_OPTIM;
I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
}
}
uint8_t
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
enum port port = dport->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
uint8_t mask;
bxt_port_to_phy_channel(port, &phy, &ch);
mask = 0;
for (lane = 0; lane < 4; lane++) {
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
if (val & LATENCY_OPTIM)
mask |= BIT(lane);
}
return mask;
}
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)

View file

@ -1371,6 +1371,10 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
{
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
bxt_port_to_phy_channel(port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@ -1378,72 +1382,72 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->config.hw_state.ebb0;
I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
temp = I915_READ(BXT_PORT_PLL(port, 0));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->config.hw_state.pll0;
I915_WRITE(BXT_PORT_PLL(port, 0), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
temp = I915_READ(BXT_PORT_PLL(port, 1));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->config.hw_state.pll1;
I915_WRITE(BXT_PORT_PLL(port, 1), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
temp = I915_READ(BXT_PORT_PLL(port, 2));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->config.hw_state.pll2;
I915_WRITE(BXT_PORT_PLL(port, 2), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
temp = I915_READ(BXT_PORT_PLL(port, 3));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->config.hw_state.pll3;
I915_WRITE(BXT_PORT_PLL(port, 3), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
temp = I915_READ(BXT_PORT_PLL(port, 6));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->config.hw_state.pll6;
I915_WRITE(BXT_PORT_PLL(port, 6), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
temp = I915_READ(BXT_PORT_PLL(port, 8));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->config.hw_state.pll8;
I915_WRITE(BXT_PORT_PLL(port, 8), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
temp = I915_READ(BXT_PORT_PLL(port, 9));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->config.hw_state.pll9;
I915_WRITE(BXT_PORT_PLL(port, 9), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
temp = I915_READ(BXT_PORT_PLL(port, 10));
temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
I915_WRITE(BXT_PORT_PLL(port, 10), temp);
I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->config.hw_state.ebb4;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@ -1459,11 +1463,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->config.hw_state.pcsdw12;
I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@ -1485,6 +1489,10 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
enum dpio_phy phy;
enum dpio_channel ch;
bxt_port_to_phy_channel(port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@ -1495,36 +1503,36 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PORT_PLL_ENABLE))
goto out;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@ -1533,11 +1541,11 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;

View file

@ -365,6 +365,8 @@ struct intel_atomic_state {
/* Gen9+ only */
struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready;
};
struct intel_plane_state {
@ -401,9 +403,6 @@ struct intel_plane_state {
int scaler_id;
struct drm_intel_sprite_colorkey ckey;
/* async flip related structures */
struct drm_i915_gem_request *wait_req;
};
struct intel_initial_plane_config {
@ -501,14 +500,6 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
/* cached plane data rate */
unsigned plane_data_rate[I915_MAX_PLANES];
unsigned plane_y_data_rate[I915_MAX_PLANES];
/* minimum block allocation */
uint16_t minimum_blocks[I915_MAX_PLANES];
uint16_t minimum_y_blocks[I915_MAX_PLANES];
} skl;
};
@ -731,7 +722,6 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */
@ -883,6 +873,14 @@ enum link_m_n_set {
M2_N2
};
struct intel_dp_desc {
u8 oui[3];
u8 device_id[6];
u8 hw_rev;
u8 sw_major_rev;
u8 sw_minor_rev;
} __packed;
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@ -905,6 +903,8 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@ -964,7 +964,7 @@ struct intel_dp {
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
struct drm_dp_aux *aux;
bool desc_valid;
};
struct intel_digital_port {
@ -1028,17 +1028,15 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
static inline struct intel_crtc *
intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
static inline struct drm_crtc *
intel_get_crtc_for_plane(struct drm_device *dev, int plane)
static inline struct intel_crtc *
intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
{
struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
@ -1098,15 +1096,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
/*
* Returns the number of planes for this pipe, ie the number of sprites + 1
* (primary plane). This doesn't count the cursor plane then.
*/
static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
{
return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
}
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@ -1123,6 +1112,9 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@ -1145,6 +1137,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@ -1247,18 +1242,17 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
static inline void
intel_wait_for_vblank(struct drm_device *dev, int pipe)
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
drm_wait_one_vblank(dev, pipe);
drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
{
const struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
@ -1305,9 +1299,9 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
@ -1335,12 +1329,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
@ -1358,7 +1346,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct drm_crtc *crtc);
bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
@ -1451,6 +1439,11 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool __intel_dp_read_desc(struct intel_dp *intel_dp,
struct intel_dp_desc *desc);
bool intel_dp_read_desc(struct intel_dp *intel_dp);
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@ -1655,23 +1648,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
static inline int
assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
{
int seq = atomic_read(&dev_priv->pm.atomic_seq);
assert_rpm_wakelock_held(dev_priv);
return seq;
}
static inline void
assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
{
WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
"HW access outside of RPM atomic section\n");
}
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
@ -1727,11 +1703,11 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
void intel_suspend_hw(struct drm_i915_private *dev_priv);
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_init_pm(struct drm_device *dev);
void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@ -1790,7 +1766,8 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);

View file

@ -393,12 +393,12 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
struct drm_crtc *crtc;
struct intel_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
crtc = intel_get_crtc_for_pipe(dev, pipe);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
mode = intel_crtc_mode_get(dev, &crtc->base);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)

View file

@ -174,7 +174,7 @@ cleanup:
return ret;
}
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
@ -204,13 +204,13 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap(page);
}
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
engine->last_submitted_seqno = seqno;
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
@ -220,15 +220,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
intel_engine_wakeup(engine);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
static void intel_engine_init_timeline(struct intel_engine_cs *engine)
{
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
}
static void intel_engine_init_requests(struct intel_engine_cs *engine)
{
init_request_active(&engine->last_request, NULL);
INIT_LIST_HEAD(&engine->request_list);
engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
/**
@ -245,9 +239,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&engine->execlist_queue);
spin_lock_init(&engine->execlist_lock);
engine->fence_context = dma_fence_context_alloc(1);
intel_engine_init_requests(engine);
intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
i915_gem_batch_pool_init(engine, &engine->batch_pool);
@ -264,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
if (!obj)
obj = i915_gem_object_create(&engine->i915->drm, size);
obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
@ -314,6 +306,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
ret = i915_gem_render_state_init(engine);
if (ret)
return ret;
return 0;
}
@ -328,6 +324,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);

View file

@ -1306,7 +1306,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
return;
for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(&crtc->base) &&
if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->base.primary->state)->base.visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}

View file

@ -57,7 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@ -75,7 +75,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@ -245,14 +245,13 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
assert_spin_locked(&dev_priv->irq_lock);
old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
@ -314,8 +313,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc =
intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
unsigned long flags;
bool old;
@ -330,8 +329,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, flags);
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
old = !crtc->pch_fifo_underrun_disabled;
crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
@ -358,7 +357,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@ -366,7 +365,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv) &&
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
crtc->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))

View file

@ -64,7 +64,7 @@ struct drm_i915_gem_request;
*/
struct i915_guc_client {
struct i915_vma *vma;
void *client_base; /* first page (only) of above */
void *vaddr;
struct i915_gem_context *owner;
struct intel_guc *guc;
@ -123,10 +123,28 @@ struct intel_guc_fw {
uint32_t ucode_offset;
};
struct intel_guc_log {
uint32_t flags;
struct i915_vma *vma;
void *buf_addr;
struct workqueue_struct *flush_wq;
struct work_struct flush_work;
struct rchan *relay_chan;
/* logging related stats */
u32 capture_miss_count;
u32 flush_interrupt_count;
u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
u32 flush_count[GUC_MAX_LOG_BUFFER];
};
struct intel_guc {
struct intel_guc_fw guc_fw;
uint32_t log_flags;
struct i915_vma *log_vma;
struct intel_guc_log log;
/* GuC2Host interrupt related state */
bool interrupts_enabled;
struct i915_vma *ads_vma;
struct i915_vma *ctx_pool_vma;
@ -146,6 +164,9 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
/* To serialize the Host2GuC actions */
struct mutex action_lock;
};
/* intel_guc_loader.c */
@ -163,5 +184,10 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
void i915_guc_register(struct drm_i915_private *dev_priv);
void i915_guc_unregister(struct drm_i915_private *dev_priv);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
#endif

View file

@ -104,9 +104,9 @@
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
#define GUC_LOG_DPC_PAGES 3
#define GUC_LOG_DPC_PAGES 7
#define GUC_LOG_DPC_SHIFT 6
#define GUC_LOG_ISR_PAGES 3
#define GUC_LOG_ISR_PAGES 7
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
@ -419,15 +419,87 @@ struct guc_ads {
u32 reserved2[4];
} __packed;
/* GuC logging structures */
enum guc_log_buffer_type {
GUC_ISR_LOG_BUFFER,
GUC_DPC_LOG_BUFFER,
GUC_CRASH_DUMP_LOG_BUFFER,
GUC_MAX_LOG_BUFFER
};
/**
* DOC: GuC Log buffer Layout
*
* Page0 +-------------------------------+
* | ISR state header (32 bytes) |
* | DPC state header |
* | Crash dump state header |
* Page1 +-------------------------------+
* | ISR logs |
* Page9 +-------------------------------+
* | DPC logs |
* Page17 +-------------------------------+
* | Crash Dump logs |
* +-------------------------------+
*
* Below state structure is used for coordination of retrieval of GuC firmware
* logs. Separate state is maintained for each log buffer type.
* read_ptr points to the location where i915 read last in log buffer and
* is read only for GuC firmware. write_ptr is incremented by GuC with number
* of bytes written for each log entry and is read only for i915.
* When any type of log buffer becomes half full, GuC sends a flush interrupt.
* GuC firmware expects that while it is writing to 2nd half of the buffer,
* first half would get consumed by Host and then get a flush completed
* acknowledgment from Host, so that it does not end up doing any overwrite
* causing loss of logs. So when buffer gets half filled & i915 has requested
* for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
* to the value of write_ptr and raise the interrupt.
* On receiving the interrupt i915 should read the buffer, clear flush_to_file
* field and also update read_ptr with the value of sample_write_ptr, before
* sending an acknowledgment to GuC. marker & version fields are for internal
* usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
* time GuC detects the log buffer overflow.
*/
struct guc_log_buffer_state {
u32 marker[2];
u32 read_ptr;
u32 write_ptr;
u32 size;
u32 sampled_write_ptr;
union {
struct {
u32 flush_to_file:1;
u32 buffer_full_cnt:4;
u32 reserved:27;
};
u32 flags;
};
u32 version;
} __packed;
union guc_log_control {
struct {
u32 logging_enabled:1;
u32 reserved1:3;
u32 verbosity:4;
u32 reserved2:24;
};
u32 value;
} __packed;
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
HOST2GUC_ACTION_LIMIT
};
@ -449,4 +521,10 @@ enum guc2host_status {
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
};
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum guc2host_message {
GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
};
#endif

View file

@ -211,11 +211,13 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
if (i915.guc_log_level >= 0) {
params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
} else
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
@ -483,6 +485,7 @@ int intel_guc_setup(struct drm_device *dev)
}
guc_interrupts_release(dev_priv);
gen9_reset_guc_interrupts(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
@ -527,6 +530,9 @@ int intel_guc_setup(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
if (i915.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;

View file

@ -0,0 +1,450 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_drv.h"
static bool
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
if (INTEL_GEN(engine->i915) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER);
}
}
static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
enum intel_engine_id id;
if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv, id) {
if (engine == signaller)
continue;
if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
return signaller;
}
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
for_each_engine(signaller, dev_priv, id) {
if(engine == signaller)
continue;
if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
return signaller;
}
}
DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
engine->name, ipehr, offset);
return ERR_PTR(-ENODEV);
}
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
void __iomem *vaddr;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
/*
* This function does not support execlist mode - any attempt to
* proceed further into this function will result in a kernel panic
* when dereferencing ring->buffer, which is not set up in execlist
* mode.
*
* The correct way of doing it would be to derive the currently
* executing ring buffer from the current context, which is derived
* from the currently running request. Unfortunately, to get the
* current request we would have to grab the struct_mutex before doing
* anything else, which would be ill-advised since some other thread
* might have grabbed it already and managed to hang itself, causing
* the hang checker to deadlock.
*
* Therefore, this function does not support execlist mode in its
* current form. Just return NULL and move on.
*/
if (engine->buffer == NULL)
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
if (!ipehr_is_semaphore_wait(engine, ipehr))
return NULL;
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
* or 4 dwords depending on the semaphore wait command size.
* Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
vaddr = (void __iomem *)engine->buffer->vaddr;
for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
head &= engine->buffer->size - 1;
/* This here seems to blow up */
cmd = ioread32(vaddr + head);
if (cmd == ipehr)
break;
head -= 4;
}
if (!i)
return NULL;
*seqno = ioread32(vaddr + head + 4) + 1;
if (INTEL_GEN(dev_priv) >= 8) {
offset = ioread32(vaddr + head + 12);
offset <<= 32;
offset |= ioread32(vaddr + head + 8);
}
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
static int semaphore_passed(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
u32 seqno;
engine->hangcheck.deadlock++;
signaller = semaphore_waits_for(engine, &seqno);
if (signaller == NULL)
return -1;
if (IS_ERR(signaller))
return 0;
/* Prevent pathological recursion due to driver bugs */
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
semaphore_passed(signaller) < 0)
return -1;
return 0;
}
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
engine->hangcheck.deadlock = 0;
}
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
u32 tmp = current_instdone | *old_instdone;
bool unchanged;
unchanged = tmp == *old_instdone;
*old_instdone |= tmp;
return unchanged;
}
static bool subunits_stuck(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_instdone instdone;
struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
bool stuck;
int slice;
int subslice;
if (engine->id != RCS)
return true;
intel_engine_get_instdone(engine, &instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
* accumulating the undone -> done transitions and only
* consider those as progress.
*/
stuck = instdone_unchanged(instdone.instdone,
&accu_instdone->instdone);
stuck &= instdone_unchanged(instdone.slice_common,
&accu_instdone->slice_common);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
&accu_instdone->sampler[slice][subslice]);
stuck &= instdone_unchanged(instdone.row[slice][subslice],
&accu_instdone->row[slice][subslice]);
}
return stuck;
}
static enum intel_engine_hangcheck_action
head_stuck(struct intel_engine_cs *engine, u64 acthd)
{
if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
return HANGCHECK_ACTIVE;
}
if (!subunits_stuck(engine))
return HANGCHECK_ACTIVE;
return HANGCHECK_HUNG;
}
static enum intel_engine_hangcheck_action
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
{
struct drm_i915_private *dev_priv = engine->i915;
enum intel_engine_hangcheck_action ha;
u32 tmp;
ha = head_stuck(engine, acthd);
if (ha != HANGCHECK_HUNG)
return ha;
if (IS_GEN2(dev_priv))
return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
* and break the hang. This should work on
* all but the second generation chipsets.
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
i915_handle_error(dev_priv, 0,
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
case 0:
return HANGCHECK_WAIT;
}
}
return HANGCHECK_HUNG;
}
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
* Further, acthd is inspected to see if the ring is stuck. On stuck case
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
static void i915_hangcheck_elapsed(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
#define BUSY 1
#define KICK 5
#define HUNG 20
#define ACTIVE_DECAY 15
if (!i915.enable_hangcheck)
return;
if (!READ_ONCE(dev_priv->gt.awake))
return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
u32 submit;
semaphore_clear_deadlocks(dev_priv);
/* We don't strictly need an irq-barrier here, as we are not
* serving an interrupt request, be paranoid in case the
* barrier has side-effects (such as preventing a broken
* cacheline snoop) and so be sure that we can see the seqno
* advance. If the seqno should stick, due to a stale
* cacheline, we would erroneously declare the GPU hung.
*/
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
acthd = intel_engine_get_active_head(engine);
seqno = intel_engine_get_seqno(engine);
submit = intel_engine_last_submit(engine);
if (engine->hangcheck.seqno == seqno) {
if (i915_seqno_passed(seqno, submit)) {
engine->hangcheck.action = HANGCHECK_IDLE;
} else {
/* We always increment the hangcheck score
* if the engine is busy and still processing
* the same request, so that no single request
* can run indefinitely (such as a chain of
* batches). The only time we do not increment
* the hangcheck score on this ring, if this
* engine is in a legitimate wait for another
* engine. In that case the waiting engine is a
* victim and we want to be sure we catch the
* right culprit. Then every time we do kick
* the ring, add a small increment to the
* score so that we can catch a batch that is
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
engine->hangcheck.action =
engine_stuck(engine, acthd);
switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
engine->hangcheck.score += HUNG;
break;
}
}
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
hung |= intel_engine_flag(engine);
if (engine->hangcheck.action != HANGCHECK_HUNG)
stuck |= intel_engine_flag(engine);
}
} else {
engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
if (engine->hangcheck.score > 0)
engine->hangcheck.score -= ACTIVE_DECAY;
if (engine->hangcheck.score < 0)
engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
acthd = 0;
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
}
engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd;
busy_count += busy;
}
if (hung) {
char msg[80];
unsigned int tmp;
int len;
/* If some rings hung but others were still busy, only
* blame the hanging rings in the synopsis.
*/
if (stuck != hung)
hung &= ~stuck;
len = scnprintf(msg, sizeof(msg),
"%s on ", stuck == hung ? "No progress" : "Hang");
for_each_engine_masked(engine, dev_priv, hung, tmp)
len += scnprintf(msg + len, sizeof(msg) - len,
"%s, ", engine->name);
msg[len-2] = '\0';
return i915_handle_error(dev_priv, hung, msg);
}
/* Reset timer in case GPU hangs without another request being added */
if (busy_count)
i915_queue_hangcheck(dev_priv);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
{
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
}
void intel_hangcheck_init(struct drm_i915_private *i915)
{
INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
}

View file

@ -1164,7 +1164,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}

View file

@ -365,7 +365,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@ -440,7 +440,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last)
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
* as we resubmit the request. See gen8_emit_request()
* as we resubmit the request. See gen8_emit_breadcrumb()
* for where we prepare the padding after the end of the
* request.
*/
@ -522,6 +522,28 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
return !engine->execlist_port[0].request;
}
/**
* intel_execlists_idle() - Determine if all engine submission ports are idle
* @dev_priv: i915 device private
*
* Return true if there are no requests pending on any of the submission ports
* of any engines.
*/
bool intel_execlists_idle(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (!i915.enable_execlists)
return true;
for_each_engine(engine, dev_priv, id)
if (!execlists_elsp_idle(engine))
return false;
return true;
}
static bool execlists_elsp_ready(struct intel_engine_cs *engine)
{
int port;
@ -599,6 +621,15 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
spin_lock_irqsave(&engine->execlist_lock, flags);
/* We keep the previous context alive until we retire the following
* request. This ensures that any the context object is still pinned
* for any residual writes the HW makes into it on the context switch
* into the next object following the breadcrumb. Otherwise, we may
* retire the context too early.
*/
request->previous_context = engine->last_context;
engine->last_context = request->ctx;
list_add_tail(&request->execlist_link, &engine->execlist_queue);
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
@ -671,46 +702,6 @@ err_unpin:
return ret;
}
/*
* intel_logical_ring_advance() - advance the tail and prepare for submission
* @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static int
intel_logical_ring_advance(struct drm_i915_gem_request *request)
{
struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
intel_ring_advance(ring);
request->tail = ring->tail;
/*
* Here we add two extra NOOPs as padding to avoid
* lite restore of a context with HEAD==TAIL.
*
* Caller must reserve WA_TAIL_DWORDS for us!
*/
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
request->wa_tail = ring->tail;
/* We keep the previous context alive until we retire the following
* request. This ensures that any the context object is still pinned
* for any residual writes the HW makes into it on the context switch
* into the next object following the breadcrumb. Otherwise, we may
* retire the context too early.
*/
request->previous_context = engine->last_context;
engine->last_context = request->ctx;
return 0;
}
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@ -744,7 +735,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
ce->state->obj->dirty = true;
ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission) {
@ -1566,39 +1557,35 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
static int gen8_emit_request(struct drm_i915_gem_request *request)
static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
{
struct intel_ring *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
*out++ = MI_NOOP;
*out++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request->ring, out);
}
static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
u32 *out)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(ring,
intel_hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, request->fence.seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
return intel_logical_ring_advance(request);
*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
*out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
*out++ = 0;
*out++ = request->global_seqno;
*out++ = MI_USER_INTERRUPT;
*out++ = MI_NOOP;
request->tail = intel_ring_offset(request->ring, out);
gen8_emit_wa_tail(request, out);
}
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
u32 *out)
{
struct intel_ring *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@ -1606,21 +1593,24 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
intel_ring_emit(ring, 0);
intel_ring_emit(ring, i915_gem_request_get_seqno(request));
*out++ = GFX_OP_PIPE_CONTROL(6);
*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE);
*out++ = intel_hws_seqno_address(request->engine);
*out++ = 0;
*out++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
return intel_logical_ring_advance(request);
*out++ = 0;
*out++ = MI_USER_INTERRUPT;
*out++ = MI_NOOP;
request->tail = intel_ring_offset(request->ring, out);
gen8_emit_wa_tail(request, out);
}
static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
@ -1637,7 +1627,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
return i915_gem_render_state_init(req);
return i915_gem_render_state_emit(req);
}
/**
@ -1694,7 +1684,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
engine->emit_flush = gen8_emit_flush;
engine->emit_request = gen8_emit_request;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
engine->irq_enable = gen8_logical_ring_enable_irq;
@ -1816,7 +1807,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096);
if (ret)
@ -2042,7 +2034,7 @@ populate_lr_context(struct i915_gem_context *ctx,
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
ctx_obj->dirty = true;
ctx_obj->mm.dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
@ -2180,7 +2172,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
reg[CTX_RING_HEAD+1] = 0;
reg[CTX_RING_TAIL+1] = 0;
ce->state->obj->dirty = true;
ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0;

View file

@ -95,5 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */

View file

@ -27,10 +27,18 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include "intel_drv.h"
static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
{
struct intel_digital_port *dig_port =
container_of(lspcon, struct intel_digital_port, lspcon);
return &dig_port->dp;
}
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
struct i2c_adapter *adapter = &lspcon->aux->ddc;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
if (drm_lspcon_get_mode(adapter, &current_mode))
DRM_ERROR("Error reading LSPCON mode\n");
@ -45,7 +53,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
{
int err;
enum drm_lspcon_mode current_mode;
struct i2c_adapter *adapter = &lspcon->aux->ddc;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
err = drm_lspcon_get_mode(adapter, &current_mode);
if (err) {
@ -72,7 +80,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon->aux->ddc;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
/* Lets probe the adaptor and check its type */
adaptor_type = drm_dp_dual_mode_detect(adapter);
@ -89,8 +97,43 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
return true;
}
static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
unsigned long start = jiffies;
if (!lspcon->desc_valid)
return;
while (1) {
struct intel_dp_desc desc;
/*
* The w/a only applies in PCON mode and we don't expect any
* AUX errors.
*/
if (!__intel_dp_read_desc(intel_dp, &desc))
return;
if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
}
if (time_after(jiffies, start + msecs_to_jiffies(1000)))
break;
usleep_range(10000, 15000);
}
DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
}
void lspcon_resume(struct intel_lspcon *lspcon)
{
lspcon_resume_in_pcon_wa(lspcon);
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
DRM_ERROR("LSPCON resume failed\n");
else
@ -111,7 +154,6 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
lspcon->active = false;
lspcon->mode = DRM_LSPCON_MODE_INVALID;
lspcon->aux = &dp->aux;
if (!lspcon_probe(lspcon)) {
DRM_ERROR("Failed to probe lspcon\n");
@ -131,6 +173,13 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
}
}
if (!intel_dp_read_dpcd(dp)) {
DRM_ERROR("LSPCON DPCD read failed\n");
return false;
}
lspcon->desc_valid = intel_dp_read_desc(dp);
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
}

View file

@ -985,7 +985,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
int pipe;
@ -1163,10 +1163,10 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = intel_get_crtc_for_pipe(dev, pipe);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
fixed_mode = intel_crtc_mode_get(dev, crtc);
fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);

View file

@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put_unlocked(new_bo);
i915_gem_object_put(new_bo);
out_free:
kfree(params);
@ -1466,7 +1466,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}

File diff suppressed because it is too large Load diff

View file

@ -648,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
if (ret != 0)
return ret;
ret = i915_gem_render_state_init(req);
ret = i915_gem_render_state_emit(req);
if (ret)
return ret;
@ -1213,90 +1213,62 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
static int gen8_rcs_signal(struct drm_i915_gem_request *req)
static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
num_rings = INTEL_INFO(dev_priv)->num_rings;
ret = intel_ring_begin(req, (num_rings-1) * 8);
if (ret)
return ret;
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring,
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
intel_ring_emit(ring, lower_32_bits(gtt_offset));
intel_ring_emit(ring, upper_32_bits(gtt_offset));
intel_ring_emit(ring, req->fence.seqno);
intel_ring_emit(ring, 0);
intel_ring_emit(ring,
MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(ring, 0);
*out++ = GFX_OP_PIPE_CONTROL(6);
*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
*out++ = lower_32_bits(gtt_offset);
*out++ = upper_32_bits(gtt_offset);
*out++ = req->global_seqno;
*out++ = 0;
*out++ = (MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
*out++ = 0;
}
intel_ring_advance(ring);
return 0;
return out;
}
static int gen8_xcs_signal(struct drm_i915_gem_request *req)
static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
num_rings = INTEL_INFO(dev_priv)->num_rings;
ret = intel_ring_begin(req, (num_rings-1) * 6);
if (ret)
return ret;
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
intel_ring_emit(ring,
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(ring,
lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, upper_32_bits(gtt_offset));
intel_ring_emit(ring, req->fence.seqno);
intel_ring_emit(ring,
MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(ring, 0);
*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
*out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
*out++ = upper_32_bits(gtt_offset);
*out++ = req->global_seqno;
*out++ = (MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
*out++ = 0;
}
intel_ring_advance(ring);
return 0;
return out;
}
static int gen6_signal(struct drm_i915_gem_request *req)
static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
{
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret, num_rings;
num_rings = INTEL_INFO(dev_priv)->num_rings;
ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
if (ret)
return ret;
int num_rings = 0;
for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
@ -1306,101 +1278,78 @@ static int gen6_signal(struct drm_i915_gem_request *req)
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, mbox_reg);
intel_ring_emit(ring, req->fence.seqno);
*out++ = MI_LOAD_REGISTER_IMM(1);
*out++ = i915_mmio_reg_offset(mbox_reg);
*out++ = req->global_seqno;
num_rings++;
}
}
if (num_rings & 1)
*out++ = MI_NOOP;
/* If num_dwords was rounded, make sure the tail pointer is correct */
if (num_rings % 2 == 0)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
return out;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
I915_WRITE_TAIL(request->engine,
intel_ring_offset(request->ring, request->tail));
I915_WRITE_TAIL(request->engine, request->tail);
}
static int i9xx_emit_request(struct drm_i915_gem_request *req)
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
u32 *out)
{
struct intel_ring *ring = req->ring;
int ret;
*out++ = MI_STORE_DWORD_INDEX;
*out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
*out++ = req->global_seqno;
*out++ = MI_USER_INTERRUPT;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, req->fence.seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
req->tail = ring->tail;
return 0;
req->tail = intel_ring_offset(req->ring, out);
}
static const int i9xx_emit_breadcrumb_sz = 4;
/**
* gen6_sema_emit_request - Update the semaphore mailbox registers
* gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
u32 *out)
{
int ret;
ret = req->engine->semaphore.signal(req);
if (ret)
return ret;
return i9xx_emit_request(req);
return i9xx_emit_breadcrumb(req,
req->engine->semaphore.signal(req, out));
}
static int gen8_render_emit_request(struct drm_i915_gem_request *req)
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
u32 *out)
{
struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
int ret;
if (engine->semaphore.signal) {
ret = engine->semaphore.signal(req);
if (ret)
return ret;
}
if (engine->semaphore.signal)
out = engine->semaphore.signal(req, out);
ret = intel_ring_begin(req, 8);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
*out++ = GFX_OP_PIPE_CONTROL(6);
*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
intel_ring_emit(ring, intel_hws_seqno_address(engine));
intel_ring_emit(ring, 0);
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
PIPE_CONTROL_QW_WRITE);
*out++ = intel_hws_seqno_address(engine);
*out++ = 0;
*out++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*out++ = 0;
*out++ = MI_USER_INTERRUPT;
*out++ = MI_NOOP;
req->tail = ring->tail;
return 0;
req->tail = intel_ring_offset(req->ring, out);
}
static const int gen8_render_emit_breadcrumb_sz = 8;
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@ -1427,7 +1376,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(ring, signal->fence.seqno);
intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring);
@ -1465,7 +1414,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
intel_ring_emit(ring, signal->fence.seqno - 1);
intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@ -1608,7 +1557,7 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@ -1617,7 +1566,7 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@ -1762,14 +1711,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
obj = vma->obj;
i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj);
i915_vma_put(vma);
i915_vma_close(vma);
i915_gem_object_unpin_map(obj);
__i915_gem_object_release_unless_active(obj);
}
static int init_status_page(struct intel_engine_cs *engine)
@ -1777,9 +1731,10 @@ static int init_status_page(struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags;
void *vaddr;
int ret;
obj = i915_gem_object_create(&engine->i915->drm, 4096);
obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@ -1812,15 +1767,22 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err_unpin;
}
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
engine->status_page.page_addr =
i915_gem_object_pin_map(obj, I915_MAP_WB);
engine->status_page.page_addr = memset(vaddr, 0, 4096);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
return 0;
err_unpin:
i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ret;
@ -1967,7 +1929,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void
intel_ring_free(struct intel_ring *ring)
{
i915_vma_put(ring->vma);
struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
__i915_gem_object_release_unless_active(obj);
kfree(ring);
}
@ -1983,14 +1949,13 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
if (ret)
goto error;
struct i915_vma *vma;
ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
PIN_GLOBAL | PIN_HIGH);
if (ret)
vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error;
}
}
/* The kernel context is only used as a placeholder for flushing the
@ -2037,9 +2002,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
intel_engine_setup_common(engine);
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
ret = intel_engine_init_common(engine);
if (ret)
goto error;
@ -2155,7 +2117,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
int ret;
long timeout;
lockdep_assert_held(&req->i915->drm.struct_mutex);
intel_ring_update_space(ring);
if (ring->space >= bytes)
@ -2185,11 +2149,11 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
ret = i915_wait_request(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NO_WAITBOOST);
if (ret)
return ret;
timeout = i915_wait_request(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
i915_gem_request_retire_upto(target);
@ -2618,9 +2582,22 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
engine->emit_request = i9xx_emit_request;
if (i915.semaphores)
engine->emit_request = gen6_sema_emit_request;
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
if (i915.semaphores) {
int num_rings;
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
if (INTEL_GEN(dev_priv) >= 8) {
engine->emit_breadcrumb_sz += num_rings * 6;
} else {
engine->emit_breadcrumb_sz += num_rings * 3;
if (num_rings & 1)
engine->emit_breadcrumb_sz++;
}
}
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
@ -2647,10 +2624,18 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_request = gen8_render_emit_request;
engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
if (i915.semaphores)
if (i915.semaphores) {
int num_rings;
engine->semaphore.signal = gen8_rcs_signal;
num_rings =
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
engine->emit_breadcrumb_sz += num_rings * 6;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;

View file

@ -4,6 +4,7 @@
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
#define I915_CMD_HASH_ORDER 9
@ -157,6 +158,7 @@ struct i915_ctx_workarounds {
};
struct drm_i915_gem_request;
struct intel_render_state;
struct intel_engine_cs {
struct drm_i915_private *i915;
@ -168,7 +170,6 @@ struct intel_engine_cs {
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
enum intel_engine_hw_id {
@ -179,10 +180,12 @@ struct intel_engine_cs {
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
struct intel_timeline *timeline;
struct intel_render_state *render_state;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@ -204,7 +207,7 @@ struct intel_engine_cs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
spinlock_t lock; /* protects the lists of requests */
spinlock_t lock; /* protects the lists of requests; irqsafe */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct intel_wait *first_wait; /* oldest waiter by retirement */
@ -252,7 +255,9 @@ struct intel_engine_cs {
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
int (*emit_request)(struct drm_i915_gem_request *req);
void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
u32 *out);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
@ -309,8 +314,6 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
u32 sync_seqno[I915_NUM_ENGINES-1];
union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
@ -327,7 +330,7 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
int (*signal)(struct drm_i915_gem_request *req);
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
} semaphore;
/* Execlists */
@ -343,27 +346,6 @@ struct intel_engine_cs {
bool preempt_wa;
u32 ctx_desc_template;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head request_list;
/**
* Seqno of request most recently submitted to request_list.
* Used exclusively by hang checker to avoid grabbing lock while
* inspecting request list.
*/
u32 last_submitted_seqno;
u32 last_pending_seqno;
/* An RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
* the request using i915_gem_active_get_rcu(), or hold the
* struct_mutex.
*/
struct i915_gem_active last_request;
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
@ -401,27 +383,6 @@ intel_engine_flag(const struct intel_engine_cs *engine)
return 1 << engine->id;
}
static inline u32
intel_engine_sync_index(struct intel_engine_cs *engine,
struct intel_engine_cs *other)
{
int idx;
/*
* rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
* vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
* bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other->id - engine->id) - 1;
if (idx < 0)
idx += I915_NUM_ENGINES;
return idx;
}
static inline void
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
@ -504,30 +465,23 @@ static inline void intel_ring_advance(struct intel_ring *ring)
*/
}
static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
return value & (ring->size - 1);
u32 offset = addr - ring->vaddr;
return offset & (ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
static inline int intel_engine_idle(struct intel_engine_cs *engine,
unsigned int flags)
{
/* Wait upon the last request to be completed */
return i915_gem_active_wait_unlocked(&engine->last_request,
flags, NULL, NULL);
}
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@ -542,6 +496,18 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
/* We are only peeking at the tail of the submit queue (and not the
* queue itself) in order to gain a hint as to the current active
* state of the engine. Callers are not expected to be taking
* engine->timeline->lock, nor are they expected to be concerned
* wtih serialising this hint with anything, so document it as
* a hint and nothing more.
*/
return READ_ONCE(engine->timeline->last_submitted_seqno);
}
int init_workarounds_ring(struct intel_engine_cs *engine);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
@ -615,9 +581,4 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
{
return i915_gem_active_isset(&engine->last_request);
}
#endif /* _INTEL_RINGBUFFER_H_ */

View file

@ -330,7 +330,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
if (power_well->data == SKL_DISP_PW_2) {
if (power_well->id == SKL_DISP_PW_2) {
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
@ -343,7 +343,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (power_well->data == SKL_DISP_PW_2)
if (power_well->id == SKL_DISP_PW_2)
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@ -658,7 +658,7 @@ static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum skl_disp_power_wells power_well_id = power_well->data;
enum skl_disp_power_wells power_well_id = power_well->id;
u32 val;
u32 mask;
@ -703,7 +703,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
switch (power_well->data) {
switch (power_well->id) {
case SKL_DISP_PW_1:
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
@ -727,13 +727,13 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
case SKL_DISP_PW_MISC_IO:
break;
default:
WARN(1, "Unknown power well %lu\n", power_well->data);
WARN(1, "Unknown power well %lu\n", power_well->id);
return;
}
req_mask = SKL_POWER_WELL_REQ(power_well->data);
req_mask = SKL_POWER_WELL_REQ(power_well->id);
enable_requested = tmp & req_mask;
state_mask = SKL_POWER_WELL_STATE(power_well->data);
state_mask = SKL_POWER_WELL_STATE(power_well->id);
is_enabled = tmp & state_mask;
if (!enable && enable_requested)
@ -769,14 +769,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
1))
DRM_ERROR("PG1 distributing status timeout\n");
} else if (power_well->data == SKL_DISP_PW_2) {
} else if (power_well->id == SKL_DISP_PW_2) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG2_DIST_STATUS,
@ -818,8 +818,8 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
SKL_POWER_WELL_STATE(power_well->data);
uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
SKL_POWER_WELL_STATE(power_well->id);
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
}
@ -845,45 +845,22 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
{
enum skl_disp_power_wells power_well_id = power_well->data;
return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
}
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum skl_disp_power_wells power_well_id = power_well->data;
struct i915_power_well *cmn_a_well = NULL;
if (power_well_id == BXT_DPIO_CMN_BC) {
/*
* We need to copy the GRC calibration value from the eDP PHY,
* so make sure it's powered up.
*/
cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
intel_power_well_get(dev_priv, cmn_a_well);
}
bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
if (cmn_a_well)
intel_power_well_put(dev_priv, cmn_a_well);
bxt_ddi_phy_init(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
bxt_ddi_phy_uninit(dev_priv, power_well->data);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return bxt_ddi_phy_is_enabled(dev_priv,
bxt_power_well_to_phy(power_well));
return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
@ -902,13 +879,11 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
if (power_well->count > 0)
bxt_ddi_phy_verify_state(dev_priv,
bxt_power_well_to_phy(power_well));
bxt_ddi_phy_verify_state(dev_priv, power_well->data);
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
bxt_ddi_phy_verify_state(dev_priv,
bxt_power_well_to_phy(power_well));
bxt_ddi_phy_verify_state(dev_priv, power_well->data);
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@ -932,7 +907,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
WARN_ON(dev_priv->cdclk_freq !=
dev_priv->display.get_display_clock_speed(&dev_priv->drm));
dev_priv->display.get_display_clock_speed(dev_priv));
gen9_assert_dbuf_enabled(dev_priv);
@ -975,7 +950,7 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
enum punit_power_well power_well_id = power_well->data;
enum punit_power_well power_well_id = power_well->id;
u32 mask;
u32 state;
u32 ctrl;
@ -1029,7 +1004,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int power_well_id = power_well->data;
int power_well_id = power_well->id;
bool enabled = false;
u32 mask;
u32 state;
@ -1138,13 +1113,15 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
intel_power_sequencer_reset(dev_priv);
intel_hpd_poll_init(dev_priv);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_set_power_well(dev_priv, power_well, true);
@ -1154,7 +1131,7 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_display_power_well_deinit(dev_priv);
@ -1164,7 +1141,7 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@ -1190,7 +1167,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum pipe pipe;
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
@ -1213,7 +1190,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
struct i915_power_well *power_well;
power_well = &power_domains->power_wells[i];
if (power_well->data == power_well_id)
if (power_well->id == power_well_id)
return power_well;
}
@ -1337,10 +1314,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
uint32_t tmp;
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
} else {
@ -1368,7 +1345,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@ -1399,10 +1376,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
@ -1551,7 +1528,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum pipe pipe = power_well->data;
enum pipe pipe = power_well->id;
bool enabled;
u32 state, ctrl;
@ -1581,7 +1558,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
enum pipe pipe = power_well->data;
enum pipe pipe = power_well->id;
u32 state;
u32 ctrl;
@ -1614,7 +1591,7 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A);
WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
@ -1622,7 +1599,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A);
WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
@ -1632,7 +1609,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A);
WARN_ON_ONCE(power_well->id != PIPE_A);
vlv_display_power_well_deinit(dev_priv);
@ -1976,12 +1953,12 @@ static struct i915_power_well vlv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = PUNIT_POWER_WELL_ALWAYS_ON,
.id = PUNIT_POWER_WELL_ALWAYS_ON,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DISP2D,
.id = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
@ -1991,7 +1968,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
@ -2000,7 +1977,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
@ -2009,7 +1986,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
@ -2018,12 +1995,12 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
@ -2043,19 +2020,19 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.id = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "dpio-common-bc",
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_D,
.id = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
};
@ -2078,57 +2055,57 @@ static struct i915_power_well skl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = SKL_DISP_PW_ALWAYS_ON,
.id = SKL_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
.id = SKL_DISP_PW_1,
},
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_MISC_IO,
.id = SKL_DISP_PW_MISC_IO,
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.data = SKL_DISP_PW_DC_OFF,
.id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
.id = SKL_DISP_PW_2,
},
{
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_A_E,
.id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_B,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_C,
.id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_D,
.id = SKL_DISP_PW_DDI_D,
},
};
@ -2143,31 +2120,33 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "power well 1",
.domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
.id = SKL_DISP_PW_1,
},
{
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.data = SKL_DISP_PW_DC_OFF,
.id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
.id = SKL_DISP_PW_2,
},
{
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.data = BXT_DPIO_CMN_A,
.id = BXT_DPIO_CMN_A,
.data = DPIO_PHY1,
},
{
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.data = BXT_DPIO_CMN_BC,
.id = BXT_DPIO_CMN_BC,
.data = DPIO_PHY0,
},
};
@ -2736,8 +2715,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
atomic_inc(&dev_priv->pm.atomic_seq);
atomic_dec(&dev_priv->pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);

View file

@ -1472,7 +1472,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@ -1509,7 +1509,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@ -2411,10 +2411,10 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
intel_attach_force_audio_property(&connector->base.base);
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}

View file

@ -1042,10 +1042,10 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
@ -1054,9 +1054,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
int num_plane_formats;
int ret;
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane) {
ret = -ENOMEM;
@ -1070,11 +1067,42 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
intel_plane->base.state = &state->base;
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
state->scaler_id = -1;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
}
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@ -1085,45 +1113,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
plane_formats = ilk_plane_formats;
num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
break;
case 7:
case 8:
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
} else {
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
}
break;
case 9:
intel_plane->can_scale = true;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
state->scaler_id = -1;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
break;
default:
MISSING_CASE(INTEL_INFO(dev)->gen);
ret = -ENODEV;
goto fail;
}
if (INTEL_GEN(dev_priv) >= 9) {
@ -1142,15 +1131,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
if (INTEL_INFO(dev)->gen >= 9)
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, plane));
@ -1163,11 +1152,11 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
return 0;
return intel_plane;
fail:
kfree(state);
kfree(intel_plane);
return ret;
return ERR_PTR(ret);
}

View file

@ -856,7 +856,7 @@ intel_enable_tv(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(encoder->base.dev,
intel_wait_for_vblank(dev_priv,
to_intel_crtc(encoder->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@ -1238,7 +1238,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@ -1268,7 +1268,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {