Revert "drm/i915: Drop ring->lazy_request"

With multiple rings generating requests independently, the outstanding
requests must also be track independently.

Reported-by: Wang Jinjin <jinjin.wang@intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=30380
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2010-09-28 10:07:56 +01:00
parent ced270fa89
commit a56ba56c27
2 changed files with 35 additions and 13 deletions

View file

@ -1545,12 +1545,23 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->pages = NULL; obj_priv->pages = NULL;
} }
static uint32_t
i915_gem_next_request_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
ring->outstanding_lazy_request = true;
return dev_priv->next_seqno;
}
static void static void
i915_gem_object_move_to_active(struct drm_gem_object *obj, i915_gem_object_move_to_active(struct drm_gem_object *obj,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_private *dev_priv = obj->dev->dev_private; struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL); BUG_ON(ring == NULL);
obj_priv->ring = ring; obj_priv->ring = ring;
@ -1563,7 +1574,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
/* Move from whatever list we were on to the tail of execution. */ /* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list, &ring->active_list); list_move_tail(&obj_priv->list, &ring->active_list);
obj_priv->last_rendering_seqno = dev_priv->next_seqno; obj_priv->last_rendering_seqno = seqno;
} }
static void static void
@ -1686,6 +1697,7 @@ i915_add_request(struct drm_device *dev,
} }
seqno = ring->add_request(dev, ring, 0); seqno = ring->add_request(dev, ring, 0);
ring->outstanding_lazy_request = false;
request->seqno = seqno; request->seqno = seqno;
request->ring = ring; request->ring = ring;
@ -1930,11 +1942,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (atomic_read(&dev_priv->mm.wedged)) if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN; return -EAGAIN;
if (seqno == dev_priv->next_seqno) { if (ring->outstanding_lazy_request) {
seqno = i915_add_request(dev, NULL, NULL, ring); seqno = i915_add_request(dev, NULL, NULL, ring);
if (seqno == 0) if (seqno == 0)
return -ENOMEM; return -ENOMEM;
} }
BUG_ON(seqno == dev_priv->next_seqno);
if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
@ -2139,12 +2152,21 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret; return ret;
} }
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
i915_gem_flush_ring(dev, NULL, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
}
int int
i915_gpu_idle(struct drm_device *dev) i915_gpu_idle(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty; bool lists_empty;
u32 seqno;
int ret; int ret;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) && lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
@ -2155,18 +2177,12 @@ i915_gpu_idle(struct drm_device *dev)
return 0; return 0;
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
seqno = dev_priv->next_seqno; ret = i915_ring_idle(dev, &dev_priv->render_ring);
i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
if (ret) if (ret)
return ret; return ret;
if (HAS_BSD(dev)) { if (HAS_BSD(dev)) {
seqno = dev_priv->next_seqno; ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
if (ret) if (ret)
return ret; return ret;
} }
@ -3938,6 +3954,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_INFO("%s: move to exec list %p\n", __func__, obj); DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif #endif
} }
i915_add_request(dev, file_priv, request, ring); i915_add_request(dev, file_priv, request, ring);
request = NULL; request = NULL;

View file

@ -81,6 +81,11 @@ struct intel_ring_buffer {
*/ */
struct list_head request_list; struct list_head request_list;
/**
* Do we have some not yet emitted requests outstanding?
*/
bool outstanding_lazy_request;
wait_queue_head_t irq_queue; wait_queue_head_t irq_queue;
drm_local_map_t map; drm_local_map_t map;
}; };